1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-11 21:13:35 +00:00

Compare commits

..

1 Commits

2594 changed files with 748618 additions and 107242 deletions

4
.github/FUNDING.yml vendored
View File

@@ -1,4 +0,0 @@
github: [ncw]
patreon: njcw
liberapay: ncw
custom: ["https://rclone.org/donate/"]

View File

@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Rclone Forum Community Support
url: https://forum.rclone.org/
about: Please ask and answer questions here.

View File

@@ -19,23 +19,24 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'go1.11', 'go1.12', 'go1.13', 'go1.14']
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.15.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.15.x'
gotags: 'cmount'
go: '1.13.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
@@ -43,7 +44,8 @@ jobs:
- job_name: windows_amd64
os: windows-latest
go: '1.15.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
@@ -52,7 +54,8 @@ jobs:
- job_name: windows_386
os: windows-latest
go: '1.15.x'
go: '1.13.x'
modules: 'off'
gotags: cmount
goarch: '386'
cgo: '1'
@@ -62,55 +65,62 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.15.x'
go: '1.13.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.10
os: ubuntu-latest
go: '1.10.x'
modules: 'off'
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
quicktest: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@master
with:
fetch-depth: 0
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v2
uses: actions/setup-go@v1
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
@@ -124,8 +134,6 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
@@ -135,10 +143,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -158,22 +166,10 @@ jobs:
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
@@ -200,14 +196,12 @@ jobs:
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
if: matrix.deploy && github.head_ref == ''
xgo:
timeout-minutes: 60
@@ -217,27 +211,26 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@master
with:
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
path: ./src/github.com/${{ github.repository }}
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
GO111MODULE=off go get -v github.com/karalabe/xgo # don't add to go.mod
# xgo \
# -image=billziss/xgo-cgofuse \
# -targets=darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
# -tags cmount \
# -dest build \
# .
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
@@ -245,14 +238,13 @@ jobs:
.
- name: Build rclone
shell: bash
run: |
make
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: Upload artifacts
run: |
make ci_upload
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'
if: github.head_ref == ''

View File

@@ -1,25 +0,0 @@
name: Docker beta build
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: beta
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

View File

@@ -1,33 +0,0 @@
name: Docker release build
on:
release:
types: [published]
jobs:
build:
runs-on: ubuntu-latest
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
id: actual_patch_version
run: echo ::set-output name=ACTUAL_PATCH_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g')
- name: Get actual minor version
id: actual_minor_version
run: echo ::set-output name=ACTUAL_MINOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1,2)
- name: Get actual major version
id: actual_major_version
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
- name: Build and publish image
uses: ilteoood/docker_buildx@439099796bfc03dd9cedeb72a0c7cb92be5cc92c
with:
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
imageName: rclone/rclone
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7
publish: true
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}

4
.gitignore vendored
View File

@@ -7,6 +7,4 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
fuzz-build.zip
*.log

View File

@@ -79,19 +79,16 @@ request](https://help.github.com/articles/creating-a-pull-request/).
You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits (make multiple commits one commit) by running:
```
git log # See how many commits you want to squash
git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
```
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
## CI for your fork ##
## Enabling CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ##
@@ -155,7 +152,6 @@ with modules beneath.
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -165,7 +161,7 @@ with modules beneath.
* fserrors - rclone specific error handling
* fshttp - http handling for rclone
* fspath - path handling for rclone
* hash - defines rclone's hash types and functions
* hash - defines rclones hash types and functions
* list - list a remote
* log - logging facilities
* march - iterates directories in lock step
@@ -186,6 +182,7 @@ with modules beneath.
* pacer - retries with backoff and paces operations
* readers - a selection of useful io.Readers
* rest - a thin abstraction over net/http for REST
* vendor - 3rd party code managed by `go mod`
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
## Writing Documentation ##
@@ -210,9 +207,6 @@ don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
@@ -265,27 +259,43 @@ rclone uses the [go
modules](https://tip.golang.org/cmd/go/#hdr-Modules__module_versions__and_more)
support in go1.11 and later to manage its dependencies.
rclone can be built with modules outside of the GOPATH
**NB** you must be using go1.11 or above to add a dependency to
rclone. Rclone will still build with older versions of go, but we use
the `go mod` command for dependencies which is only in go1.11 and
above.
rclone can be built with modules outside of the GOPATH, but for
backwards compatibility with older go versions, rclone also maintains
a `vendor` directory with all the external code rclone needs for
building.
The `vendor` directory is entirely managed by the `go mod` tool, do
not add things manually.
To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
instructions below. These will fetch the dependency, add it to
`go.mod` and `go.sum` and vendor it for older go versions.
GO111MODULE=on go get github.com/ncw/new_dependency
GO111MODULE=on go mod vendor
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
Please check in the changes generated by `go mod` including `go.mod`
and `go.sum` in the same commit as your other changes.
Please check in the changes generated by `go mod` including the
`vendor` directory and `go.mod` and `go.sum` in a single commit
separate from any other code changes with the title "vendor: add
github.com/ncw/new_dependency". Remember to `git add` any new files
in `vendor`.
## Updating a dependency ##
If you need to update a dependency then run
GO111MODULE=on go get -u github.com/pkg/errors
GO111MODULE=on go mod vendor
Check in a single commit as above.
Check in in a single commit as above.
## Updating all the dependencies ##
@@ -331,9 +341,10 @@ Getting going
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `go install -tags noencode`
* `rclone purge -v TestRemote:rclone-info`
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
@@ -348,7 +359,7 @@ Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
* test_all -backend remote
Or if you want to run the integration tests manually:
@@ -373,7 +384,7 @@ alphabetical order of full name of remote (eg `drive` is ordered as
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant

View File

@@ -3,16 +3,16 @@ FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse tzdata && \
echo "user_allow_other" >> /etc/fuse.conf
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/

View File

@@ -2,20 +2,16 @@
Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | tardigrade backend |
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
**This is a work in progress Draft**
@@ -33,7 +29,7 @@ Rclone uses the labels like this:
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `FUSE` - do do with `rclone mount` command
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release

14095
MANUAL.html generated

File diff suppressed because it is too large Load Diff

11881
MANUAL.md generated

File diff suppressed because it is too large Load Diff

15017
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

124
Makefile
View File

@@ -1,35 +1,33 @@
SHELL = bash
# Branch we are working on
BRANCH := $(or $(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
LAST_TAG := $(shell git describe --abbrev=0 --tags $(VERSION)^)
endif
TAG_BRANCH := .$(BRANCH)
BRANCH_PATH := branch/$(BRANCH)/
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -beta.NNNN.CCCCCCCC (N=Commit number, C=Commit)
VERSION_SUFFIX := -beta.$(shell git rev-list --count HEAD).$(shell git show --no-patch --no-notes --pretty='%h' HEAD)
# TAG is current version + commit number + commit + branch
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
ifdef RELEASE_TAG
TAG := $(RELEASE_TAG)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
@@ -48,8 +46,7 @@ endif
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
@@ -59,13 +56,10 @@ vars:
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version:
@echo '$(TAG)'
@@ -76,10 +70,10 @@ test: rclone test_all
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
# Do source code quality checks
check: rclone
@@ -91,43 +85,30 @@ check: rclone
build_dep:
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux
release_dep_linux:
# Get the release dependencies
release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
# Get the release dependencies we only install on Windows
release_dep_windows:
GO111MODULE=off GOOS="" GOARCH="" go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct and indirect dependencies and test dependencies
update:
GO111MODULE=on go get -u -t ./...
-#GO111MODULE=on go get -d $(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Tidy the module dependencies
tidy:
GO111MODULE=on go get -u ./...
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
pandoc -s --from markdown-smart --to man MANUAL.md -o rclone.1
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
./bin/make_manual.py
MANUAL.html: MANUAL.md
pandoc -s --from markdown-smart --to html MANUAL.md -o MANUAL.html
pandoc -s --from markdown --to html MANUAL.md -o MANUAL.html
MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
@@ -149,27 +130,14 @@ clean:
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
website:
rm -rf docs/public
cd docs && hugo
@if grep -R "raw HTML omitted" docs/public ; then echo "ERROR: found unescaped HTML - fix the markdown source" ; fi
upload_website: website
rclone -v sync docs/public memstore:www-rclone-org
upload_test_website: website
rclone -P sync docs/public test-rclone-org:
validate_website: website
find docs/public -type f -name "*.html" | xargs tidy --mute-id yes -errors --gnu-emacs yes --drop-empty-elements no --warn-proprietary-attributes no --mute MISMATCHED_ATTRIBUTE_WARN
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -201,17 +169,24 @@ log_since_last_release:
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
ci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
ci_beta:
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -220,40 +195,33 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)
# Fetch the binary builds from GitHub actions
# Fetch the binary builds from travis and appveyor
fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server -v -w --disableFastRender
cd docs && hugo server -v -w
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(VERSION)\" -a -v
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
@echo "Version is $(VERSION)"
git tag -f -s -m "Version $(VERSION)" $(VERSION)
startdev:
@echo "Version is $(VERSION)"
@echo "Next version is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -8,7 +8,10 @@
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
@@ -31,7 +34,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@@ -43,7 +45,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
@@ -59,12 +60,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -9,32 +9,29 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --tags origin master
* # Wait for the GitHub builds to complete then...
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
* make startdev
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the dependencies
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* make update
@@ -42,46 +39,71 @@ Early in the next release cycle update the dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug:
Set vars
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-stable
* git co ${BASE_TAG}-fixes
* git cherry-pick any fixes
* Do the steps as above
* make startstable
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* git push
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker pull golang
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.52.0
docker push rclone/rclone:1.52
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +1 @@
v1.53.3
v1.49.5

View File

@@ -5,7 +5,6 @@ import (
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -47,5 +46,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(opt.Remote, name+":") {
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
}
return cache.Get(fspath.JoinRootPath(opt.Remote, root))
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, err
}
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
}

View File

@@ -23,7 +23,6 @@ import (
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -31,12 +30,9 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"

View File

@@ -28,17 +28,18 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
)
const (
enc = encodings.AmazonCloudDrive
folderKind = "FOLDER"
fileKind = "FILE"
statusAvailable = "AVAILABLE"
@@ -71,12 +72,28 @@ func init() {
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig, nil)
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Amazon Application Client ID.",
Required: true,
}, {
Name: config.ConfigClientSecret,
Help: "Amazon Application Client Secret.",
Required: true,
}, {
Name: config.ConfigAuthURL,
Help: "Auth server URL.\nLeave blank to use Amazon's.",
Advanced: true,
}, {
Name: config.ConfigTokenURL,
Help: "Token server url.\nleave blank to use Amazon's.",
Advanced: true,
}, {
Name: "checkpoint",
Help: "Checkpoint for internal polling (debug).",
Hide: fs.OptionHideBoth,
@@ -120,23 +137,15 @@ which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Enc encoder.MultiEncoder `config:"encoding"`
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
}
// Fs represents a remote acd server
@@ -153,7 +162,7 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry
}
// Object describes an acd object
// Object describes a acd object
//
// Will definitely have info but maybe not meta
type Object struct {
@@ -213,7 +222,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
return true, err
@@ -377,7 +386,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -404,7 +413,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -472,7 +481,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent {
continue
}
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
*node.Name = enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing
out = append(out, node)
}
@@ -498,6 +507,10 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -645,7 +658,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return nil, err
}
// If not create it
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -658,7 +671,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now()
f.tokenRenewer.Start()
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
f.tokenRenewer.Stop()
var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
@@ -676,7 +689,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -698,6 +717,10 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return nil, err
}
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
@@ -767,24 +790,54 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src parent
_, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
findPath = srcRemote
var srcDirectoryID string
if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
}
if err != nil {
return err
}
@@ -830,6 +883,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -921,8 +978,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// ------------------------------------------------------------
@@ -973,7 +1030,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.info != nil {
return nil
}
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
@@ -984,7 +1041,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1104,7 +1161,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
return f.shouldRetry(resp, err)
})
return newInfo, err
@@ -1300,7 +1357,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name
name := f.opt.Enc.ToStandardName(*node.Name)
name := enc.ToStandardName(*node.Name)
if len(path) > 0 {
path = path + "/" + name
} else {

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris
package azureblob
@@ -9,12 +9,14 @@ import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -24,19 +26,15 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
const (
@@ -61,10 +59,10 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
const enc = encodings.AzureBlob
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -129,57 +127,21 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeCtl |
encoder.EncodeDel |
encoder.EncodeBackSlash |
encoder.EncodeRightPeriod),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
}
// Fs represents a remote azure server
@@ -198,10 +160,9 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes an azure object
// Object describes a azure object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
@@ -228,7 +189,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return "Azure root"
return fmt.Sprintf("Azure root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("Azure container %s", f.rootContainer)
@@ -251,7 +212,7 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
@@ -287,12 +248,6 @@ var retryErrorCodes = []int{
func (f *Fs) shouldRetry(err error) (bool, error) {
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case "InvalidBlobOrBlock":
// These errors happen sometimes in multipart uploads
// because of block concurrency issues
return true, err
}
statusCode := storageErr.Response().StatusCode
for _, e := range retryErrorCodes {
if statusCode == e {
@@ -338,7 +293,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
}
// httpClientFactory creates a Factory object that sends HTTP requests
// to an rclone's http.Client.
// to a rclone's http.Client.
//
// copied from azblob.newDefaultHTTPClientFactory
func httpClientFactory(client *http.Client) pipeline.Factory {
@@ -357,9 +312,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Don't log stuff to syslog/Windows Event log
pipeline.SetForceLogEnabled(false)
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
@@ -418,12 +370,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -639,7 +585,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
// }
remote := f.opt.Enc.ToStandardPath(file.Name)
remote := enc.ToStandardPath(file.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd name received %q", remote)
continue
@@ -660,7 +606,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/")
remote = f.opt.Enc.ToStandardPath(remote)
remote = enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd directory name received %q", remote)
continue
@@ -724,7 +670,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
return entries, nil
}
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), container.Properties.LastModified)
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
f.cache.MarkOK(container.Name)
entries = append(entries, d)
return nil
@@ -858,11 +804,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
@@ -872,10 +813,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// If this is a SAS URL limited to a container then assume it is already created
if f.isLimited {
return nil
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
@@ -967,7 +904,8 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat!
container, directory := f.split(dir)
if container == "" || directory != "" {
// Delegate to caller if not root of a container
@@ -1028,19 +966,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1183,6 +1108,22 @@ func (o *Object) readMetaData() (err error) {
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" {
return nil
}
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err
}
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
@@ -1284,116 +1225,103 @@ type readSeeker struct {
io.Seeker
}
// increment the slice passed in as LSB binary
func increment(xs []byte) {
for i, digit := range xs {
newDigit := digit + 1
xs[i] = newDigit
if newDigit >= digit {
// exit if no carry
break
}
}
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
// Calculate correct chunkSize
chunkSize := int64(o.fs.opt.ChunkSize)
totalParts := -1
// Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block
// size (9.52 TB) so we do not need to part commit block lists
// or garbage collect uncommitted blocks.
//
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
// 195GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
})
} else {
// Adjust partSize until the number of parts is small enough.
if size/chunkSize >= maxTotalParts {
// Calculate partition size rounded up to the nearest MB
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
var totalParts int64
for {
// Calculate number of parts
var remainder int64
totalParts, remainder = size/chunkSize, size%chunkSize
if remainder != 0 {
totalParts++
}
if totalParts < maxTotalParts {
break
}
// Double chunk size if the number of parts is too big
chunkSize *= 2
if chunkSize > int64(maxChunkSize) {
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
}
totalParts = int(size / chunkSize)
if size%chunkSize != 0 {
totalParts++
}
}
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
// Utilities are cloned from above example
// These helper functions convert a binary block ID to a base-64 string and vice versa
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
// These helper functions convert an int block ID to a base-64 string and vice versa
blockIDIntToBase64 := func(blockID uint64) string {
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
return blockIDBinaryToBase64(binaryBlockID)
}
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
// block ID variables
var (
rawID uint64
blockID = "" // id in base64 encoded form
blocks []string
)
// increment the blockID
nextID := func() {
rawID++
blockID = blockIDIntToBase64(rawID)
blocks = append(blocks, blockID)
}
// Get BlockBlobURL, we will use default pipeline here
blockBlobURL := blob.ToBlockBlobURL()
ctx := context.Background()
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// Upload the chunks
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = size // remaining size in file for logging only, -1 if size < 0
position = int64(0) // position in file
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
finished = false // set when we have read EOF
blocks []string // list of blocks for finalize
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
)
for part := 0; !finished; part++ {
// Get a block of memory from the pool and a token which limits concurrency
o.fs.uploadToken.Get()
buf := memPool.Get()
free := func() {
memPool.Put(buf) // return the buf
o.fs.uploadToken.Put() // return the token
remaining := size
position := int64(0)
errs := make(chan error, 1)
var wg sync.WaitGroup
outer:
for part := 0; part < int(totalParts); part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
reqSize := remaining
if reqSize >= chunkSize {
reqSize = chunkSize
}
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 { // end if no data
free()
break
}
finished = true
} else if err != nil {
free()
return errors.Wrap(err, "multipart upload failed to read source")
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
break outer
}
buf = buf[:n]
// increment the blockID and save the blocks for finalize
increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
g.Go(func() (err error) {
defer free()
nextID()
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64, blockID string) {
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
@@ -1405,19 +1333,28 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
return o.fs.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "multipart upload failed to upload part")
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
case errs <- err:
default:
}
return
}
return nil
})
}(part, position, blockID)
// ready for next block
if size >= 0 {
remaining -= chunkSize
}
remaining -= chunkSize
position += chunkSize
}
err = g.Wait()
wg.Wait()
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
return err
}
@@ -1454,16 +1391,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
httpHeaders.ContentType = fs.MimeType(ctx, o)
// Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, an MD5 checksum will also be computed for each uploaded block
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
@@ -1477,7 +1412,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
@@ -1487,7 +1422,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload {
// If a large file upload in chunks
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
} else {
// Write a small blob in one transaction
blockBlobURL := blob.ToBlockBlobURL()
@@ -1572,13 +1507,12 @@ func (o *Object) GetTier() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris
package azureblob
@@ -16,20 +16,3 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier
assert.True(t, enabled)
}
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in []byte
want []byte
}{
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -1,6 +1,6 @@
// Test AzureBlob filesystem interface
// +build !plan9,!solaris,!js,go1.13
// +build !plan9,!solaris
package azureblob

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 solaris js !go1.13
// +build plan9 solaris
package azureblob

View File

@@ -337,11 +337,3 @@ type CopyFileRequest struct {
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}

View File

@@ -23,20 +23,20 @@ import (
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
)
const enc = encodings.B2
const (
defaultEndpoint = "https://api.backblazeb2.com"
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
@@ -55,9 +55,6 @@ const (
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -117,16 +114,6 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6GB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size. Must fit in memory.
@@ -138,13 +125,8 @@ minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
@@ -164,49 +146,23 @@ The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// See: https://www.backblaze.com/b2/docs/files.html
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// FIXME: allow /, but not leading, trailing or double
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
TestMode string `config:"test_mode"`
Versions bool `config:"versions"`
HardDelete bool `config:"hard_delete"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
}
// Fs represents a remote b2 server
@@ -228,8 +184,7 @@ type Fs struct {
uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
bufferTokens chan []byte // control concurrency of multipart uploads
}
// Object describes a b2 object
@@ -365,6 +320,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
f.fillBufferTokens() // reset the buffer tokens
}
return
}
@@ -425,13 +381,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
_bucketType: make(map[string]string, 1),
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -446,13 +395,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.srv.SetHeader(testModeHeader, testMode)
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
}
f.fillBufferTokens()
err = f.authorizeAccount(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to authorize account")
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
allowedBucket := enc.ToStandardName(f.info.Allowed.BucketName)
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
}
@@ -568,25 +518,32 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
buf = f.pool.Get()
// Fill up (or reset) the buffer tokens
func (f *Fs) fillBufferTokens() {
f.bufferTokens = make(chan []byte, fs.Config.Transfers)
for i := 0; i < fs.Config.Transfers; i++ {
f.bufferTokens <- nil
}
}
// getUploadBlock gets a block from the pool of size chunkSize
func (f *Fs) getUploadBlock() []byte {
buf := <-f.bufferTokens
if buf == nil {
buf = make([]byte, f.opt.ChunkSize)
}
// fs.Debugf(f, "Getting upload block %p", buf)
return buf
}
// putBuf returns a buffer to the memory pool and an upload token
//
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
// putUploadBlock returns a block to the pool of size chunkSize
func (f *Fs) putUploadBlock(buf []byte) {
buf = buf[:cap(buf)]
if len(buf) != int(f.opt.ChunkSize) {
panic("bad blocksize returned to pool")
}
f.uploadToken.Put()
// fs.Debugf(f, "Returning upload block %p", buf)
f.bufferTokens <- buf
}
// Return an Object from a path
@@ -666,11 +623,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
var request = api.ListFileNamesRequest{
BucketID: bucketID,
MaxFileCount: chunkSize,
Prefix: f.opt.Enc.FromStandardPath(directory),
Prefix: enc.FromStandardPath(directory),
Delimiter: delimiter,
}
if directory != "" {
request.StartFileName = f.opt.Enc.FromStandardPath(directory)
request.StartFileName = enc.FromStandardPath(directory)
}
opts := rest.Opts{
Method: "POST",
@@ -690,7 +647,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
for i := range response.Files {
file := &response.Files[i]
file.Name = f.opt.Enc.ToStandardPath(file.Name)
file.Name = enc.ToStandardPath(file.Name)
// Finish if file name no longer has prefix
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
return nil
@@ -701,7 +658,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
@@ -891,7 +848,7 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
bucket.Name = enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
@@ -1013,7 +970,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
}
var request = api.CreateBucketRequest{
AccountID: f.info.AccountID,
Name: f.opt.Enc.FromStandardName(bucket),
Name: enc.FromStandardName(bucket),
Type: "allPrivate",
}
var response api.Bucket
@@ -1097,7 +1054,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
}
var request = api.HideFileRequest{
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
Name: enc.FromStandardPath(bucketPath),
}
var response api.File
err = f.pacer.Call(func() (bool, error) {
@@ -1125,7 +1082,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
}
var request = api.DeleteFileRequest{
ID: ID,
Name: f.opt.Enc.FromStandardPath(Name),
Name: enc.FromStandardPath(Name),
}
var response api.File
err := f.pacer.Call(func() (bool, error) {
@@ -1143,8 +1100,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
// if oldOnly is true then it deletes only non current files.
//
// Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
bucket, directory := f.split(dir)
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
if bucket == "" {
return errors.New("can't purge from root")
}
@@ -1219,76 +1175,19 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
wg.Wait()
if !oldOnly {
checkErr(f.Rmdir(ctx, dir))
checkErr(f.Rmdir(ctx, ""))
}
return errReturn
}
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purge(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
return f.purge(ctx, "", true)
}
// copy does a server side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
if srcObj.size >= int64(f.opt.CopyCutoff) {
if newInfo == nil {
newInfo, err = srcObj.getMetaData(ctx)
if err != nil {
return err
}
}
up, err := f.newLargeUpload(ctx, dstObj, nil, srcObj, f.opt.CopyCutoff, true, newInfo)
if err != nil {
return err
}
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.opt.Enc.FromStandardPath(dstPath),
DestBucketID: destBucketID,
}
if newInfo == nil {
request.MetadataDirective = "COPY"
} else {
request.MetadataDirective = "REPLACE"
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(&response)
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
}
// Copy src to this remote using server side copy operations.
@@ -1301,21 +1200,47 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj, nil)
destBucketID, err := f.getBucketID(ctx, dstBucket)
if err != nil {
return nil, err
}
return dstObj, nil
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: enc.FromStandardPath(dstPath),
MetadataDirective: "COPY",
DestBucketID: destBucketID,
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
err = o.decodeMetaDataFileInfo(&response)
if err != nil {
return nil, err
}
return o, nil
}
// Hashes returns the supported hash sets.
@@ -1343,7 +1268,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
FileNamePrefix: enc.FromStandardPath(path.Join(f.root, remote)),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse
@@ -1358,7 +1283,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
// PublicLink returns a link for downloading without account
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
bucket, bucketPath := f.split(remote)
var RootURL string
if f.opt.DownloadURL == "" {
@@ -1435,21 +1360,6 @@ func (o *Object) Size() int64 {
return o.size
}
// Clean the SHA1
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
@@ -1465,7 +1375,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
o.sha1 = cleanSHA1(o.sha1)
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
@@ -1586,10 +1495,28 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if err != nil {
return err
}
_, bucketPath := o.split()
info.Info[timeKey] = timeString(modTime)
// Copy to the same name, overwriting the metadata only
return o.fs.copy(ctx, o, o, info)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: o.id,
Name: enc.FromStandardPath(bucketPath), // copy to same name
MetadataDirective: "REPLACE",
ContentType: info.ContentType,
Info: info.Info,
}
var response api.FileInfo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
return o.fs.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
}
// Storable returns if this object is storable
@@ -1673,12 +1600,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.RootURL = o.fs.opt.DownloadURL
}
// Download by id if set and not using DownloadURL otherwise by name
if o.id != "" && o.fs.opt.DownloadURL == "" {
// Download by id if set otherwise by name
if o.id != "" {
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
} else {
bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
opts.Path += "/file/" + urlEncode(enc.FromStandardName(bucket)) + "/" + urlEncode(enc.FromStandardPath(bucketPath))
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1705,7 +1632,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
o.sha1 = cleanSHA1(o.sha1)
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
@@ -1765,8 +1691,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
buf := o.fs.getBuf(false)
buf := o.fs.getUploadBlock()
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
@@ -1776,24 +1701,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
o.fs.putBuf(buf, false)
o.fs.putUploadBlock(buf)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putBuf(buf, false)
defer o.fs.putUploadBlock(buf)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
up, err := o.fs.newLargeUpload(ctx, o, in, src)
if err != nil {
return err
}
@@ -1877,10 +1800,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
Options: options,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
"X-Bz-File-Name": urlEncode(enc.FromStandardPath(bucketPath)),
"Content-Type": fs.MimeType(ctx, src),
sha1Header: calculatedSha1,
timeHeader: timeString(modTime),

View File

@@ -20,9 +20,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/sync/errgroup"
)
type hashAppendingReader struct {
@@ -70,26 +68,20 @@ func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
// largeUpload is used to control the upload of large files which need chunking
type largeUpload struct {
f *Fs // parent Fs
o *Object // object being uploaded
doCopy bool // doing copy rather than upload
what string // text name of operation for logs
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
f *Fs // parent Fs
o *Object // object being uploaded
in io.Reader // read the data from here
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
}
// newLargeUpload starts an upload of object o from in with metadata in src
//
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, chunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote
size := src.Size()
parts := int64(0)
@@ -97,8 +89,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts = size / int64(o.fs.opt.ChunkSize)
if size%int64(o.fs.opt.ChunkSize) != 0 {
parts++
}
if parts > maxParts {
@@ -107,6 +99,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts
}
modTime := src.ModTime(ctx)
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
@@ -117,24 +110,18 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
}
if newInfo == nil {
modTime := src.ModTime(ctx)
request.ContentType = fs.MimeType(ctx, src)
request.Info = map[string]string{
BucketID: bucketID,
Name: enc.FromStandardPath(bucketPath),
ContentType: fs.MimeType(ctx, src),
Info: map[string]string{
timeKey: timeString(modTime),
},
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
// Set the SHA1 if known
if !o.fs.opt.DisableCheckSum || doCopy {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1
}
}
} else {
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
@@ -144,24 +131,18 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if err != nil {
return nil, err
}
up = &largeUpload{
f: f,
o: o,
doCopy: doCopy,
what: "upload",
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
if doCopy {
up.what = "copy"
up.src = src.(*Object)
} else {
up.in, up.wrap = accounting.UnWrap(in)
in, wrap := accounting.UnWrap(in)
up = &largeUpload{
f: f,
o: o,
in: in,
wrap: wrap,
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, sha1SliceSize),
}
return up, nil
}
@@ -203,6 +184,13 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
@@ -275,41 +263,9 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done copying chunk %d", part)
}
return err
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
Path: "/b2_finish_large_file",
@@ -331,7 +287,6 @@ func (up *largeUpload) finish(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -344,139 +299,139 @@ func (up *largeUpload) cancel(ctx context.Context) error {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
fs.Errorf(up.o, "Failed to cancel large file %s: %v", up.what, err)
}
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
if err != nil {
select {
case errs <- err:
default:
}
}
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
default:
}
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish(ctx)
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
errs := make(chan error, 1)
hasMoreParts := true
var wg sync.WaitGroup
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
outer:
for part := int64(2); hasMoreParts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
return nil
})
err = g.Wait()
if err != nil {
return err
// Get a block of memory
buf := up.f.getUploadBlock()
// Read the chunk
var n int
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
err = nil
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putUploadBlock(buf)
err = nil
break outer
} else if err != nil {
// other kinds of errors indicate failure
up.f.putUploadBlock(buf)
break outer
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
return up.finishOrCancelOnError(ctx, err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
)
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := up.f.getBuf(up.doCopy)
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, up.doCopy)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putBuf(buf, up.doCopy)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, up.doCopy)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
func (up *largeUpload) Upload(ctx context.Context) error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
var wg sync.WaitGroup
var err error
outer:
for part := int64(1); part <= up.parts; part++ {
// Check any errors
select {
case err = <-errs:
break outer
default:
}
return nil
})
err = g.Wait()
if err != nil {
return err
reqSize := remaining
if reqSize >= int64(up.f.opt.ChunkSize) {
reqSize = int64(up.f.opt.ChunkSize)
}
// Get a block of memory
buf := up.f.getUploadBlock()[:reqSize]
// Read the chunk
_, err = io.ReadFull(up.in, buf)
if err != nil {
up.f.putUploadBlock(buf)
break outer
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
remaining -= reqSize
}
return up.finish(ctx)
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
}

View File

@@ -222,23 +222,3 @@ type AppAuth struct {
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}
// User is returned from /users/me
type User struct {
Type string `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
Login string `json:"login"`
CreatedAt time.Time `json:"created_at"`
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}

View File

@@ -25,8 +25,6 @@ import (
"strings"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
@@ -38,6 +36,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
@@ -49,12 +48,15 @@ import (
"golang.org/x/oauth2/jws"
)
const enc = encodings.Box
const (
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
@@ -87,33 +89,43 @@ func init() {
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(jsonFile, boxSubType, name, m)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config("box", name, m, oauthConfig, nil)
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Box App Client Id.\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\nLeave blank normally.",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
@@ -134,46 +146,10 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
// > Box only supports file or folder names that are 255 characters or less.
// > File names containing non-printable ascii, "/" or "\", names with leading
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
//
// Testing revealed names with leading spaces work fine.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
@@ -196,6 +172,7 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
@@ -243,11 +220,8 @@ func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
AccessToken string `config:"access_token"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
}
// Fs represents a remote box
@@ -299,7 +273,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a box 'url'
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -330,7 +304,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -386,22 +360,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
root = parsePath(root)
client := fshttp.NewClient(fs.Config)
var ts *oauthutil.TokenSource
// If not using an accessToken, create an oauth client and tokensource
if opt.AccessToken == "" {
client, ts, err = oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
}
@@ -411,34 +379,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
// If using an accessToken, set the Authorization header
if f.opt.AccessToken != "" {
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
}
// Get rootFolderID
rootID := f.opt.RootFolderID
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -541,7 +488,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(),
}
mkdir := api.CreateFolder{
Name: f.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
Parent: api.Parent{
ID: pathID,
},
@@ -607,7 +554,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
item.Name = enc.ToStandardName(item.Name)
if fn(item) {
found = true
break OUTER
@@ -631,6 +578,10 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -671,7 +622,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return
}
@@ -727,7 +678,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -752,6 +709,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -830,7 +791,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: fieldsValue(),
}
copyFile := api.CopyFile{
Name: f.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
@@ -856,8 +817,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// move a file or folder
@@ -869,7 +830,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(),
}
move := api.UpdateFileMove{
Name: f.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
@@ -885,30 +846,6 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
return info, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/users/me",
}
var user api.User
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to read user info")
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use
Total: fs.NewUsageValue(user.SpaceAmount), // bytes total
Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free
}
return usage, nil
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
@@ -958,14 +895,64 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID)
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
@@ -974,7 +961,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false)
var opts rest.Opts
if err == nil {
@@ -1013,66 +1000,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return info.SharedLink.URL, err
}
// deletePermanently permenently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",
NoResponse: true,
}
if itemType == api.ItemTypeFile {
opts.Path = "/files/" + id + "/trash"
} else {
opts.Path = "/folders/" + id + "/trash"
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
})
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/trash/items",
Parameters: url.Values{
"fields": []string{"type", "id"},
},
}
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
offset := 0
for {
opts.Parameters.Set("offset", strconv.Itoa(offset))
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list trash")
}
for i := range result.Entries {
item := &result.Entries[i]
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
return errors.Wrap(err, "failed to delete file")
}
} else {
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
}
offset += result.Limit
if offset >= result.TotalCount {
break
}
}
return
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
@@ -1226,9 +1153,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{
@@ -1245,7 +1172,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
Options: options,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
@@ -1272,26 +1198,24 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()
modTime := src.ModTime(ctx)
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return err
}
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
err = o.upload(ctx, in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
}
return err
}
@@ -1312,12 +1236,10 @@ var (
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -19,7 +19,6 @@ import (
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/rest"
)
@@ -39,7 +38,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = o.fs.opt.Enc.FromStandardName(leaf)
request.FileName = enc.FromStandardName(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -55,7 +54,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -65,7 +64,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
@@ -173,7 +171,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
@@ -183,13 +181,15 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
// Cancel the session if something went wrong
defer atexit.OnError(&err, func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
}
})()
}()
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
@@ -236,7 +236,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -65,7 +65,6 @@ func init() {
Name: "cache",
Description: "Cache a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -87,7 +86,7 @@ func init() {
Advanced: true,
}, {
Name: "plex_insecure",
Help: "Skip all certificate verification when connecting to the Plex server",
Help: "Skip all certificate verifications when connecting to the Plex server",
Advanced: true,
}, {
Name: "chunk_size",
@@ -339,7 +338,7 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
// NewFs constructs an Fs from the path, container:path
// NewFs constructs a Fs from the path, container:path
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -361,10 +360,15 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(remotePath)
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(opt.Remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", opt.Remote)
}
remotePath := fspath.JoinRootPath(wPath, rootPath)
wrappedFs, wrapErr := wInfo.NewFs(wName, remotePath, wConfig)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, errors.Wrapf(wrapErr, "failed to make remote %s:%s to wrap", wName, remotePath)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -385,7 +389,6 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
cleanupChan: make(chan bool, 1),
notifiedRemotes: make(map[string]bool),
}
cache.PinUntilFinalized(f.Fs, f)
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
f.plexConnector = &plexConnector{}
@@ -516,6 +519,9 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
// override only those features that use a temp fs and it doesn't support them
//f.features.ChangeNotify = f.ChangeNotify
if f.opt.TempWritePath != "" {
if f.tempFs.Features().Copy == nil {
f.features.Copy = nil
}
if f.tempFs.Features().Move == nil {
f.features.Move = nil
}
@@ -1526,9 +1532,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(src, "source remote (%v) doesn't support Copy", src.Fs())
return nil, fs.ErrorCantCopy
}
if f.opt.TempWritePath != "" && src.Fs() == f.tempFs {
return nil, fs.ErrorCantCopy
}
// the source must be a cached object or we abort
srcObj, ok := src.(*Object)
if !ok {
@@ -1698,20 +1701,17 @@ func (f *Fs) Hashes() hash.Set {
return f.Fs.Hashes()
}
// Purge all files in the directory
func (f *Fs) Purge(ctx context.Context, dir string) error {
if dir == "" {
// FIXME this isn't quite right as it should purge the dir prefix
fs.Infof(f, "purging cache")
f.cache.Purge()
}
// Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache")
f.cache.Purge()
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
return nil
}
err := do(ctx, dir)
err := do(ctx)
if err != nil {
return err
}
@@ -1828,19 +1828,6 @@ func (f *Fs) isRootInPath(p string) bool {
return strings.HasPrefix(p, f.Root()+"/")
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
for _, dir := range dirs {
_ = f.cache.RemoveDir(dir.Remote())
}
return do(ctx, dirs)
}
// DirCacheFlush flushes the dir cache
func (f *Fs) DirCacheFlush() {
_ = f.cache.RemoveDir("")
@@ -1895,31 +1882,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1937,6 +1899,4 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
)

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test
@@ -16,7 +15,9 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
@@ -30,10 +31,12 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -49,7 +52,9 @@ const (
var (
remoteName string
mountDir string
uploadDir string
useMount bool
runInstance *run
errNotSupported = errors.New("not supported")
decryptedToEncryptedRemotes = map[string]string{
@@ -85,7 +90,9 @@ var (
func init() {
goflag.StringVar(&remoteName, "remote-internal", "TestInternalCache", "Remote to test with, defaults to local filesystem")
goflag.StringVar(&mountDir, "mount-dir-internal", "", "")
goflag.StringVar(&uploadDir, "upload-dir-internal", "", "")
goflag.BoolVar(&useMount, "cache-use-mount", false, "Test only with mount")
}
// TestMain drives the tests
@@ -93,7 +100,7 @@ func TestMain(m *testing.M) {
goflag.Parse()
var rc int
log.Printf("Running with the following params: \n remote: %v", remoteName)
log.Printf("Running with the following params: \n remote: %v, \n mount: %v", remoteName, useMount)
runInstance = newRun()
rc = m.Run()
os.Exit(rc)
@@ -266,8 +273,32 @@ func TestInternalObjNotFound(t *testing.T) {
require.Nil(t, obj)
}
func TestInternalRemoteWrittenFileFoundInMount(t *testing.T) {
if !runInstance.useMount {
t.Skip("test needs mount mode")
}
id := fmt.Sprintf("tirwffim%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
var testData []byte
if runInstance.rootIsCrypt {
testData, err = base64.StdEncoding.DecodeString(cryptedTextBase64)
require.NoError(t, err)
} else {
testData = []byte("test content")
}
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test"), testData)
data, err := runInstance.readDataFromRemote(t, rootFs, "test", 0, int64(len([]byte("test content"))), false)
require.NoError(t, err)
require.Equal(t, "test content", string(data))
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -311,7 +342,6 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
}
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
@@ -661,6 +691,79 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
}
func TestInternalChangeSeenAfterRc(t *testing.T) {
cacheExpire := rc.Calls.Get("cache/expire")
assert.NotNil(t, cacheExpire)
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.useMount {
t.Skipf("needs mount")
}
if !runInstance.wrappedIsExternal {
t.Skipf("needs drive")
}
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
testData := randStringBytes(int(chunkSize*4 + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime)
require.NoError(t, err)
// get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
// Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin")
require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
_, err = runInstance.list(t, rootFs, "")
require.NoError(t, err)
// create some rand test data
testData2 := randStringBytes(int(chunkSize))
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
// list should have 1 item only
li1, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li1, 1)
// Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
require.NoError(t, err)
require.Contains(t, m, "status")
require.Contains(t, m, "message")
require.Equal(t, "ok", m["status"])
require.Contains(t, m["message"], "cached directory cleared")
// list should have 2 items now
li2, err := runInstance.list(t, rootFs, "")
require.NoError(t, err)
require.Len(t, li2, 2)
}
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
@@ -808,9 +911,15 @@ func TestInternalBug2117(t *testing.T) {
type run struct {
okDiff time.Duration
runDefaultCfgMap configmap.Simple
mntDir string
tmpUploadDir string
useMount bool
isMounted bool
rootIsCrypt bool
wrappedIsExternal bool
unmountFn func() error
unmountRes chan error
vfs *vfs.VFS
tempFiles []*os.File
dbPath string
chunkPath string
@@ -820,7 +929,9 @@ type run struct {
func newRun() *run {
var err error
r := &run{
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
okDiff: time.Second * 9, // really big diff here but the build machines seem to be slow. need a different way for this
useMount: useMount,
isMounted: false,
}
// Read in all the defaults for all the options
@@ -833,6 +944,32 @@ func newRun() *run {
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
}
if mountDir == "" {
if runtime.GOOS != "windows" {
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
if err != nil {
log.Fatalf("Failed to create mount dir: %v", err)
return nil
}
} else {
// Find a free drive letter
drive := ""
for letter := 'E'; letter <= 'Z'; letter++ {
drive = string(letter) + ":"
_, err := os.Stat(drive + "\\")
if os.IsNotExist(err) {
goto found
}
}
log.Print("Couldn't find free drive letter for test")
found:
r.mntDir = drive
}
} else {
r.mntDir = mountDir
}
log.Printf("Mount Dir: %v", r.mntDir)
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
@@ -872,15 +1009,6 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
return nil, nil
}
// Config to pass to NewFs
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// if the remote doesn't exist, create a new one with a local one for it
// identify which is the cache remote (it can be wrapped by a crypt too)
rootIsCrypt := false
@@ -889,8 +1017,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
localRemote := remote + "-local"
config.FileSet(localRemote, "type", "local")
config.FileSet(localRemote, "nounc", "true")
m.Set("type", "cache")
m.Set("remote", localRemote+":"+filepath.Join(os.TempDir(), localRemote))
config.FileSet(remote, "type", "cache")
config.FileSet(remote, "remote", localRemote+":/var/tmp/"+localRemote)
} else {
remoteType := config.FileGet(remote, "type", "")
if remoteType == "" {
@@ -900,8 +1028,8 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
if remoteType != "cache" {
if remoteType == "crypt" {
rootIsCrypt = true
m.Set("password", cryptPassword1)
m.Set("password2", cryptPassword2)
config.FileSet(remote, "password", cryptPassword1)
config.FileSet(remote, "password2", cryptPassword2)
}
remoteRemote := config.FileGet(remote, "remote", "")
if remoteRemote == "" {
@@ -927,6 +1055,14 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
fs.Config.LowLevelRetries = 1
m := configmap.Simple{}
for k, v := range r.runDefaultCfgMap {
m.Set(k, v)
}
for k, v := range flags {
m.Set(k, v)
}
// Instantiate root
if purge {
boltDb.PurgeTempUploads()
@@ -946,21 +1082,33 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
_ = f.Features().Purge(context.Background(), "")
_ = f.Features().Purge(context.Background())
require.NoError(t, err)
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
if r.useMount && !r.isMounted {
r.mountFs(t, f)
}
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
if r.useMount && r.isMounted {
r.unmountFs(t, f)
}
err := f.Features().Purge(context.Background())
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
cfs.StopBackgroundRunners()
if r.useMount && runtime.GOOS != "windows" {
err = os.RemoveAll(r.mntDir)
require.NoError(t, err)
}
err = os.RemoveAll(r.tmpUploadDir)
require.NoError(t, err)
@@ -991,6 +1139,23 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f
}
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content))
}
@@ -1000,11 +1165,37 @@ func (r *run) writeObjectString(t *testing.T, f fs.Fs, remote, content string) f
}
func (r *run) writeRemoteBytes(t *testing.T, f fs.Fs, remote string, data []byte) {
r.writeObjectBytes(t, f, remote, data)
var err error
if r.useMount {
err = r.retryBlock(func() error {
return ioutil.WriteFile(path.Join(r.mntDir, remote), data, 0600)
}, 3, time.Second*3)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectBytes(t, f, remote, data)
}
}
func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.ReadCloser) {
r.writeObjectReader(t, f, remote, in)
defer func() {
_ = in.Close()
}()
if r.useMount {
out, err := os.Create(path.Join(r.mntDir, remote))
require.NoError(t, err)
defer func() {
_ = out.Close()
}()
_, err = io.Copy(out, in)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
} else {
r.writeObjectReader(t, f, remote, in)
}
}
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
@@ -1021,6 +1212,10 @@ func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Read
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo)
require.NoError(t, err)
if r.useMount {
r.vfs.WaitForWriters(10 * time.Second)
}
return obj
}
@@ -1028,16 +1223,26 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
var err error
var obj fs.Object
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
if r.useMount {
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data1, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote)
} else {
in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
obj, err = f.Put(context.Background(), in1, objInfo1)
require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote)
require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2)
}
require.NoError(t, err)
return obj
@@ -1047,12 +1252,30 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
size := end - offset
checkSample := make([]byte, size)
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
if r.useMount {
f, err := os.Open(path.Join(r.mntDir, remote))
defer func() {
_ = f.Close()
}()
if err != nil {
return checkSample, err
}
_, _ = f.Seek(offset, io.SeekStart)
totalRead, err := io.ReadFull(f, checkSample)
checkSample = checkSample[:totalRead]
if err == io.EOF || err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return checkSample, err
}
} else {
co, err := f.NewObject(context.Background(), remote)
if err != nil {
return checkSample, err
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
}
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
@@ -1075,19 +1298,28 @@ func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLe
}
func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
err := f.Mkdir(context.Background(), remote)
var err error
if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else {
err = f.Mkdir(context.Background(), remote)
}
require.NoError(t, err)
}
func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
var err error
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
if r.useMount {
err = os.Remove(path.Join(r.mntDir, remote))
} else {
err = obj.Remove(context.Background())
var obj fs.Object
obj, err = f.NewObject(context.Background(), remote)
if err != nil {
err = f.Rmdir(context.Background(), remote)
} else {
err = obj.Remove(context.Background())
}
}
return err
@@ -1096,14 +1328,42 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
var err error
var l []interface{}
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll)
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll)
}
}
return l, err
}
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {
@@ -1128,7 +1388,13 @@ func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().DirMove != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
if err != nil {
return err
@@ -1144,7 +1410,13 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Move != nil {
if runInstance.useMount {
err = os.Rename(path.Join(runInstance.mntDir, src), path.Join(runInstance.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1164,7 +1436,13 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
var err error
if rootFs.Features().Copy != nil {
if r.useMount {
err = r.copyFile(t, rootFs, path.Join(r.mntDir, src), path.Join(r.mntDir, dst))
if err != nil {
return err
}
r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return err
@@ -1184,6 +1462,13 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return time.Time{}, err
@@ -1194,6 +1479,13 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
var err error
if r.useMount {
fi, err := os.Stat(path.Join(runInstance.mntDir, src))
if err != nil {
return int64(0), err
}
return fi.Size(), nil
}
obj1, err := rootFs.NewObject(context.Background(), src)
if err != nil {
return int64(0), err
@@ -1204,15 +1496,28 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) error {
var err error
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
if r.useMount {
var f *os.File
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer func() {
_ = f.Close()
r.vfs.WaitForWriters(10 * time.Second)
}()
_, err = f.WriteString(data + append)
} else {
var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src)
if err != nil {
return err
}
data1 := []byte(data + append)
r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1)
}
data1 := []byte(data + append)
reader := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), reader, objInfo1)
return err
}

78
backend/cache/cache_mount_unix_test.go vendored Normal file
View File

@@ -0,0 +1,78 @@
// +build !plan9,!windows
package cache_test
import (
"os"
"testing"
"time"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
device := f.Name() + ":" + f.Root()
var options = []fuse.MountOption{
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
fuse.Subtype("rclone"),
fuse.FSName(device), fuse.VolumeName(device),
fuse.NoAppleDouble(),
fuse.NoAppleXattr(),
//fuse.AllowOther(),
}
err := os.MkdirAll(r.mntDir, os.ModePerm)
require.NoError(t, err)
c, err := fuse.Mount(r.mntDir, options...)
require.NoError(t, err)
filesys := mount.NewFS(f)
server := fusefs.New(c, nil)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
err := server.Serve(filesys)
closeErr := c.Close()
if err == nil {
err = closeErr
}
r.unmountRes <- err
}()
// check if the mount process has an error to report
<-c.Ready
require.NoError(t, c.MountError)
r.unmountFn = func() error {
// Shutdown the VFS
filesys.VFS.Shutdown()
return fuse.Unmount(r.mntDir)
}
r.vfs = filesys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -0,0 +1,124 @@
// +build windows
package cache_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/billziss-gh/cgofuse/fuse"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require"
)
// waitFor runs fn() until it returns true or the timeout expires
func waitFor(fn func() bool) (ok bool) {
const totalWait = 10 * time.Second
const individualWait = 10 * time.Millisecond
for i := 0; i < int(totalWait/individualWait); i++ {
ok = fn()
if ok {
return ok
}
time.Sleep(individualWait)
}
return false
}
func (r *run) mountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
device := f.Name() + ":" + f.Root()
options := []string{
"-o", "fsname=" + device,
"-o", "subtype=rclone",
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
"-o", "uid=-1",
"-o", "gid=-1",
"-o", "allow_other",
// This causes FUSE to supply O_TRUNC with the Open
// call which is more efficient for cmount. However
// it does not work with cgofuse on Windows with
// WinFSP so cmount must work with or without it.
"-o", "atomic_o_trunc",
"--FileSystemName=rclone",
}
fsys := cmount.NewFS(f)
host := fuse.NewFileSystemHost(fsys)
// Serve the mount point in the background returning error to errChan
r.unmountRes = make(chan error, 1)
go func() {
var err error
ok := host.Mount(r.mntDir, options)
if !ok {
err = errors.New("mount failed")
}
r.unmountRes <- err
}()
// unmount
r.unmountFn = func() error {
// Shutdown the VFS
fsys.VFS.Shutdown()
if host.Unmount() {
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err != nil
}) {
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
}
return nil
}
return errors.New("host unmount failed")
}
// Wait for the filesystem to become ready, checking the file
// system didn't blow up before starting
select {
case err := <-r.unmountRes:
require.NoError(t, err)
case <-time.After(time.Second * 3):
}
// Wait for the mount point to be available on Windows
// On Windows the Init signal comes slightly before the mount is ready
if !waitFor(func() bool {
_, err := os.Stat(r.mntDir)
return err == nil
}) {
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
}
r.vfs = fsys.VFS
r.isMounted = true
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
// FIXME implement cmount
t.Skip("windows not supported yet")
var err error
for i := 0; i < 4; i++ {
err = r.unmountFn()
if err != nil {
//log.Printf("signal to umount failed - retrying: %v", err)
time.Sleep(3 * time.Second)
continue
}
break
}
require.NoError(t, err)
err = <-r.unmountRes
require.NoError(t, err)
err = r.vfs.CleanUp()
require.NoError(t, err)
r.isMounted = false
}

View File

@@ -1,7 +1,6 @@
// Test Cache filesystem interface
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test
@@ -18,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -1,6 +1,6 @@
// Build for cache for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9 js
// +build plan9
package cache

View File

@@ -1,5 +1,4 @@
// +build !plan9,!js
// +build !race
// +build !plan9
package cache_test

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -101,6 +101,15 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -24,16 +24,15 @@ const (
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
@@ -104,9 +103,7 @@ func (o *Object) updateData(ctx context.Context, source fs.Object) {
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
}
// Fs returns its FS info
@@ -271,9 +268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
o.CacheTs = time.Now()
o.persist()
@@ -314,12 +309,11 @@ func (o *Object) Remove(ctx context.Context) error {
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
o.cacheHashesMu.Lock()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
if found {
return cachedHash, nil
}
@@ -330,9 +324,7 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if err != nil {
return "", err
}
o.cacheHashesMu.Lock()
o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache

View File

@@ -1,4 +1,4 @@
// +build !plan9,!js
// +build !plan9
package cache
@@ -16,10 +16,10 @@ import (
"sync"
"time"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
)
// Constants
@@ -767,6 +767,31 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err
}
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -980,6 +1005,15 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1027,6 +1061,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
})
}
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()

View File

@@ -1,23 +0,0 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

View File

@@ -12,19 +12,16 @@ import (
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
@@ -37,57 +34,46 @@ import (
// and optional metadata object. If it's present,
// meta object is named after the original file.
//
// The only supported metadata format is simplejson atm.
// It supports only per-file meta objects that are rudimentary,
// used mostly for consistency checks (lazily for performance reasons).
// Other formats can be developed that use an external meta store
// free of these limitations, but this needs some support from
// rclone core (eg. metadata store interfaces).
//
// The following types of chunks are supported:
// data and control, active and temporary.
// Chunk type is identified by matching chunk file name
// based on the chunk name format configured by user.
//
// Both data and control chunks can be either temporary (aka hidden)
// or active (non-temporary aka normal aka permanent).
// Both data and control chunks can be either temporary or
// active (non-temporary).
// An operation creates temporary chunks while it runs.
// By completion it removes temporary and leaves active chunks.
// By completion it removes temporary and leaves active
// (aka normal aka permanent) chunks.
//
// Temporary chunks have a special hardcoded suffix in addition
// to the configured name pattern.
// Temporary suffix includes so called transaction identifier
// (abbreviated as `xactID` below), a generic non-negative base-36 "number"
// Temporary (aka hidden) chunks have a special hardcoded suffix
// in addition to the configured name pattern. The suffix comes last
// to prevent name collisions with non-temporary chunks.
// Temporary suffix includes so called transaction number usually
// abbreviated as `xactNo` below, a generic non-negative integer
// used by parallel operations to share a composite object.
// Chunker also accepts the longer decimal temporary suffix (obsolete),
// which is transparently converted to the new format. In its maximum
// length of 13 decimals it makes a 7-digit base-36 number.
//
// Chunker can tell data chunks from control chunks by the characters
// located in the "hash placeholder" position of configured format.
// Data chunks have decimal digits there.
// Control chunks have in that position a short lowercase alphanumeric
// string (starting with a letter) prepended by underscore.
// Control chunks have a short lowercase literal prepended by underscore
// in that position.
//
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
tempSuffixRegStr = `_([0-9a-z]{4,9})`
tempSuffixRegOld = `\.\.tmp_([0-9]{10,13})`
ctrlTypeRegStr = `[a-z]{3,9}`
tempChunkFormat = `%s..tmp_%010d`
tempChunkRegStr = `\.\.tmp_([0-9]{10,19})`
)
var (
// regular expressions to validate control type and temporary suffix
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
tempSuffixRegexp = regexp.MustCompile(`^` + tempSuffixRegStr + `$`)
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
)
// Normally metadata is a small piece of JSON (about 100-300 bytes).
// The size of valid metadata must never exceed this limit.
// The size of valid metadata size must never exceed this limit.
// Current maximum provides a reasonable room for future extensions.
//
// Please refrain from increasing it, this can cause old rclone versions
@@ -115,9 +101,6 @@ const revealHidden = false
// Prevent memory overflow due to specially crafted chunk name
const maxSafeChunkNumber = 10000000
// Number of attempts to find unique transaction identifier
const maxTransactionProbes = 100
// standard chunker errors
var (
ErrChunkOverflow = errors.New("chunk number overflow")
@@ -130,6 +113,13 @@ const (
delFailed = 2 // move, then delete and try again if failed
)
// Note: metadata logic is tightly coupled with chunker code in many
// places, eg. in checks whether a file should have meta object or is
// eligible for chunking.
// If more metadata formats (or versions of a format) are added in future,
// it may be advisable to factor it into a "metadata strategy" interface
// similar to chunkingReader or linearReader below.
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -239,18 +229,15 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.New("can't point remote at itself - check the value of the remote setting")
}
baseName, basePath, err := fspath.Parse(remote)
baseInfo, baseName, basePath, baseConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
if baseName != "" {
baseName += ":"
}
// Look for a file first
remotePath := fspath.JoinRootPath(basePath, rpath)
baseFs, err := cache.Get(baseName + remotePath)
baseFs, err := baseInfo.NewFs(baseName, remotePath, baseConfig)
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", baseName+remotePath)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", baseName, remotePath)
}
if !operations.CanServerSideMove(baseFs) {
return nil, errors.New("can't use chunker on a backend which doesn't support server side move or copy")
@@ -262,7 +249,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
root: rpath,
opt: *opt,
}
cache.PinUntilFinalized(f.base, f)
f.dirSort = true // processEntries requires that meta Objects prerun data chunks atm.
if err := f.configure(opt.NameFormat, opt.MetaFormat, opt.HashType); err != nil {
@@ -275,8 +261,8 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := cache.Get(baseName + firstChunkPath)
firstChunkPath := f.makeChunkName(remotePath, 0, "", -1)
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
if testErr == fs.ErrorIsFile {
err = testErr
}
@@ -296,8 +282,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
ServerSideAcrossConfigs: true,
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}
@@ -326,16 +310,12 @@ type Fs struct {
dataNameFmt string // name format of data chunks
ctrlNameFmt string // name format of control chunks
nameRegexp *regexp.Regexp // regular expression to match chunk names
xactIDRand *rand.Rand // generator of random transaction identifiers
xactIDMutex sync.Mutex // mutex for the source of randomness
opt Options // copy of Options
features *fs.Features // optional features
dirSort bool // reserved for future, ignored
}
// configure sets up chunker for given name format, meta format and hash type.
// It also seeds the source of random transaction identifiers.
// configure must be called only from NewFs or by unit tests.
// configure must be called only from NewFs or by unit tests
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
@@ -346,10 +326,6 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setHashType(hashType); err != nil {
return err
}
randomSeed := time.Now().UnixNano()
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
return nil
}
@@ -438,13 +414,13 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
}
reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr)
// this must be non-greedy or else it could eat up temporary suffix
// this must be non-greedy or else it can eat up temporary suffix
const mainNameRegStr = "(.+?)"
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
strRegex = fmt.Sprintf("^%s(?:%s)?$", strRegex, tempChunkRegStr)
f.nameRegexp = regexp.MustCompile(strRegex)
// craft printf formats for active data/control chunks
@@ -459,36 +435,34 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
return nil
}
// makeChunkName produces chunk name (or path) for a given file.
// makeChunkName produces chunk name (or path) for given file.
//
// filePath can be name, relative or absolute path of main file.
// mainPath can be name, relative or absolute path of main file.
//
// chunkNo must be a zero based index of data chunk.
// Negative chunkNo eg. -1 indicates a control chunk.
// ctrlType is type of control chunk (must be valid).
// ctrlType must be "" for data chunks.
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
// xactNo is a transaction number.
// Negative xactNo eg. -1 indicates an active chunk,
// otherwise produce temporary chunk name.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo int64) string {
dir, mainName := path.Split(mainPath)
var name string
switch {
case chunkNo >= 0 && ctrlType == "":
name = fmt.Sprintf(f.dataNameFmt, parentName, chunkNo+f.opt.StartFrom)
name = fmt.Sprintf(f.dataNameFmt, mainName, chunkNo+f.opt.StartFrom)
case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType):
name = fmt.Sprintf(f.ctrlNameFmt, parentName, ctrlType)
name = fmt.Sprintf(f.ctrlNameFmt, mainName, ctrlType)
default:
panic("makeChunkName: invalid argument") // must not produce something we can't consume
}
if xactID != "" {
tempSuffix = fmt.Sprintf(tempSuffixFormat, xactID)
if !tempSuffixRegexp.MatchString(tempSuffix) {
panic("makeChunkName: invalid argument")
}
if xactNo >= 0 {
name = fmt.Sprintf(tempChunkFormat, name, xactNo)
}
return dir + name + tempSuffix
return dir + name
}
// parseChunkName checks whether given file path belongs to
@@ -496,21 +470,20 @@ func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string
//
// filePath can be name, relative or absolute path of a file.
//
// Returned parentPath is path of the composite file owning the chunk.
// It's a non-empty string if valid chunk name is detected
// or "" if it's not a chunk.
// Returned mainPath is a non-empty string if valid chunk name
// is detected or "" if it's not a chunk.
// Other returned values depend on detected chunk type:
// data or control, active or temporary:
//
// data chunk - the returned chunkNo is non-negative and ctrlType is ""
// control chunk - the chunkNo is -1 and ctrlType is a non-empty string
// active chunk - the returned xactID is ""
// temporary chunk - the xactID is a non-empty string
func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ctrlType, xactID string) {
// control chunk - the chunkNo is -1 and ctrlType is non-empty string
// active chunk - the returned xactNo is -1
// temporary chunk - the xactNo is non-negative integer
func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrlType string, xactNo int64) {
dir, name := path.Split(filePath)
match := f.nameRegexp.FindStringSubmatch(name)
if match == nil || match[1] == "" {
return "", -1, "", ""
return "", -1, "", -1
}
var err error
@@ -521,26 +494,19 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
}
if chunkNo -= f.opt.StartFrom; chunkNo < 0 {
fs.Infof(f, "invalid data chunk number in file %q", name)
return "", -1, "", ""
return "", -1, "", -1
}
}
xactNo = -1
if match[4] != "" {
xactID = match[4]
}
if match[5] != "" {
// old-style temporary suffix
number, err := strconv.ParseInt(match[5], 10, 64)
if err != nil || number < 0 {
fs.Infof(f, "invalid old-style transaction number in file %q", name)
return "", -1, "", ""
if xactNo, err = strconv.ParseInt(match[4], 10, 64); err != nil || xactNo < 0 {
fs.Infof(f, "invalid transaction number in file %q", name)
return "", -1, "", -1
}
// convert old-style transaction number to base-36 transaction ID
xactID = fmt.Sprintf(tempSuffixFormat, strconv.FormatInt(number, 36))
xactID = xactID[1:] // strip leading underscore
}
parentPath = dir + match[1]
mainPath = dir + match[1]
ctrlType = match[3]
return
}
@@ -548,74 +514,17 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if mainPath, _, _, _ := f.parseChunkName(filePath); mainPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
return fmt.Errorf("chunk overlap with %q", mainPath)
}
if boolVal, isBool := o.(bool); !isBool || boolVal {
fs.Errorf(o, "chunk overlap with %q", parentPath)
fs.Errorf(o, "chunk overlap with %q", mainPath)
}
}
return nil
}
// newXactID produces a sufficiently random transaction identifier.
//
// The temporary suffix mask allows identifiers consisting of 4-9
// base-36 digits (ie. digits 0-9 or lowercase letters a-z).
// The identifiers must be unique between transactions running on
// the single file in parallel.
//
// Currently the function produces 6-character identifiers.
// Together with underscore this makes a 7-character temporary suffix.
//
// The first 4 characters isolate groups of transactions by time intervals.
// The maximum length of interval is base-36 "zzzz" ie. 1,679,615 seconds.
// The function rather takes a maximum prime closest to this number
// (see https://primes.utm.edu) as the interval length to better safeguard
// against repeating pseudo-random sequences in cases when rclone is
// invoked from a periodic scheduler like unix cron.
// Thus, the interval is slightly more than 19 days 10 hours 33 minutes.
//
// The remaining 2 base-36 digits (in the range from 0 to 1295 inclusive)
// are taken from the local random source.
// This provides about 0.1% collision probability for two parallel
// operations started at the same second and working on the same file.
//
// Non-empty filePath argument enables probing for existing temporary chunk
// to further eliminate collisions.
func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err error) {
const closestPrimeZzzzSeconds = 1679609
const maxTwoBase36Digits = 1295
unixSec := time.Now().Unix()
if unixSec < 0 {
unixSec = -unixSec // unlikely but the number must be positive
}
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
last2chars := strconv.FormatInt(randomness, 36)
xactID = fmt.Sprintf("%04s%02s", first4chars, last2chars)
if filePath == "" {
return
}
probeChunk := f.makeChunkName(filePath, 0, "", xactID)
_, probeErr := f.base.NewObject(ctx, probeChunk)
if probeErr != nil {
return
}
}
return "", fmt.Errorf("can't setup transaction for %s", filePath)
}
// List the objects and directories in dir into entries.
// The entries can be returned in any order but should be
// for a complete directory.
@@ -693,8 +602,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
switch entry := dirOrObject.(type) {
case fs.Object:
remote := entry.Remote()
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
if xactID != "" {
if mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(remote); mainRemote != "" {
if xactNo != -1 {
if revealHidden {
fs.Infof(f, "ignore temporary chunk %q", remote)
}
@@ -777,7 +686,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
//
// Please note that every NewObject invocation will scan the whole directory.
// Using here something like fs.DirCache might improve performance
// (yet making the logic more complex).
// (but will make logic more complex, though).
//
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
@@ -843,8 +752,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if !strings.Contains(entryRemote, remote) {
continue // bypass regexp to save cpu
}
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactNo != -1 {
continue // skip non-conforming, temporary and control chunks
}
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
@@ -877,7 +786,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This is either a composite object with metadata or a non-chunked
// file without metadata. Validate it and update the total data size.
// As an optimization, skip metadata reading here - we will call
// readMetadata lazily when needed (reading can be expensive).
// readMetadata lazily when needed.
if err := o.validate(); err != nil {
return nil, err
}
@@ -934,11 +843,14 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
}()
baseRemote := remote
xactID, errXact := f.newXactID(ctx, baseRemote)
if errXact != nil {
return nil, errXact
// Use system timer as a trivial source of transaction numbers,
// don't try hard to safeguard against chunk collisions between
// parallel transactions.
xactNo := time.Now().Unix()
if xactNo < 0 {
xactNo = -xactNo // unlikely but transaction number must be positive
}
baseRemote := remote
// Transfer chunks data
for c.chunkNo = 0; !c.done; c.chunkNo++ {
@@ -946,7 +858,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
return nil, ErrChunkOverflow
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactNo)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
@@ -960,8 +872,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
info := f.wrapInfo(src, chunkRemote, size)
// Refill chunkLimit and let basePut repeatedly call chunkingReader.Read()
c.chunkLimit = c.chunkSize
// TODO: handle range/limit options
chunk, errChunk := basePut(ctx, wrapIn, info, options...)
if errChunk != nil {
@@ -1052,7 +962,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
// Rename data chunks from temporary to final names
for chunkNo, chunk := range c.chunks {
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", -1)
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
if errMove != nil {
return nil, errMove
@@ -1170,14 +1080,10 @@ func (c *chunkingReader) updateHashes() {
func (c *chunkingReader) Read(buf []byte) (bytesRead int, err error) {
if c.chunkLimit <= 0 {
// Chunk complete - switch to next one.
// Note #1:
// We might not get here because some remotes (eg. box multi-uploader)
// read the specified size exactly and skip the concluding EOF Read.
// Then a check in the put loop will kick in.
// Note #2:
// The crypt backend after receiving EOF here will call Read again
// and we must insist on returning EOF, so we postpone refilling
// chunkLimit to the main loop.
c.chunkLimit = c.chunkSize
return 0, io.EOF
}
if int64(len(buf)) > c.chunkLimit {
@@ -1315,6 +1221,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObject("", o, nil), nil
}
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Hashes returns the supported hash sets.
// Chunker advertises a hash type if and only if it can be calculated
// for files of any size, non-chunked or composite.
@@ -1346,7 +1257,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.base.Rmdir(ctx, dir)
}
// Purge all files in the directory
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
@@ -1357,12 +1268,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// As a result it removes not only composite chunker files with their
// active chunks but also all hidden temporary chunks in the directory.
//
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
do := f.base.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
return do(ctx)
}
// Remove an object (chunks and metadata, if any)
@@ -1702,8 +1613,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
if mainPath != "" && xactID == "" {
mainPath, _, _, xactNo := f.parseChunkName(path)
if mainPath != "" && xactNo == -1 {
path = mainPath
}
}
@@ -2152,7 +2063,7 @@ type metaSimpleJSON struct {
// Current implementation creates metadata in three cases:
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
// - if consistent hashing is on but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
version := metadataVersion
@@ -2266,11 +2177,6 @@ func (f *Fs) String() string {
return fmt.Sprintf("Chunked '%s:%s'", f.name, f.root)
}
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -64,40 +64,35 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
assert.Equal(t, wantChunkName, gotChunkName)
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
assert.Equal(t, wantXactNo, gotXactNo)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
}
// invalid formats
@@ -116,223 +111,142 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
assertParseName(`part_fish_43`, "fish", 42, "", -1)
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.3`, "", -1, "", -1)
assertParseName(`fish.chunk.001`, "", -1, "", -1)
assertParseName(`fish.chunk.21`, "", -1, "", -1)
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk.info`, "", -1, "", -1)
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", -1)
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
assertMakeNamePanics("fish", -1, "in", -1)
assertMakeNamePanics("fish", -1, "up", 4)
assertMakeNamePanics("fish", -1, "x", -1)
assertMakeNamePanics("fish", -1, "c", 4)
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", -1)
assertMakeNamePanics("fish", -1, "info_", -1)
assertMakeNamePanics("fish", -2, ".bind", -3)
assertMakeNamePanics("fish", -2, "bind.", -3)
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", 15678)
assertMakeNamePanics("fish", -1, "info_", 999)
assertMakeNamePanics("fish", -2, ".bind", 0)
assertMakeNamePanics("fish", -2, "bind.", 0)
}
func testSmallFileInternals(t *testing.T, f *Fs) {
@@ -469,7 +383,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
}
err := f.Mkdir(ctx, billyChunkName(1))
@@ -519,7 +433,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
@@ -570,7 +484,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -618,7 +532,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)

View File

@@ -71,6 +71,30 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int
@@ -112,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
type cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -125,8 +148,8 @@ type Cipher struct {
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &cipher{
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
@@ -149,7 +172,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
//
// Note that empty passsword makes all 0x00 keys which is used in the
// tests.
func (c *Cipher) Key(password, salt string) (err error) {
func (c *cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
@@ -173,12 +196,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte {
func (c *cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) {
func (c *cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
@@ -217,13 +240,13 @@ func decodeFileName(in string) ([]byte, error) {
// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and
// Rogaway.
//
// This makes for deterministic encryption which is what we want - the
// This makes for determinstic encryption which is what we want - the
// same filename must encrypt to the same thing.
//
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
func (c *cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -233,7 +256,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
}
// decryptSegment decrypts a path segment
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -260,7 +283,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
}
// Simple obfuscation routines
func (c *Cipher) obfuscateSegment(plaintext string) string {
func (c *cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -347,7 +370,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
return result.String()
}
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -434,7 +457,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// encryptFileName encrypts a file path
func (c *Cipher) encryptFileName(in string) string {
func (c *cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/")
for i := range segments {
// Skip directory name encryption if the user chose to
@@ -452,7 +475,7 @@ func (c *Cipher) encryptFileName(in string) string {
}
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
func (c *cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
}
@@ -460,7 +483,7 @@ func (c *Cipher) EncryptFileName(in string) string {
}
// EncryptDirName encrypts a directory path
func (c *Cipher) EncryptDirName(in string) string {
func (c *cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in
}
@@ -468,7 +491,7 @@ func (c *Cipher) EncryptDirName(in string) string {
}
// decryptFileName decrypts a file path
func (c *Cipher) decryptFileName(in string) (string, error) {
func (c *cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/")
for i := range segments {
var err error
@@ -491,7 +514,7 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
}
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
func (c *cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -503,15 +526,14 @@ func (c *Cipher) DecryptFileName(in string) (string, error) {
}
// DecryptDirName decrypts a directory path
func (c *Cipher) DecryptDirName(in string) (string, error) {
func (c *cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil
}
return c.decryptFileName(in)
}
// NameEncryptionMode returns the encryption mode in use for names
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode
}
@@ -559,7 +581,7 @@ func (n *nonce) increment() {
n.carry(0)
}
// add a uint64 to the nonce
// add an uint64 to the nonce
func (n *nonce) add(x uint64) {
carry := uint16(0)
for i := 0; i < 8; i++ {
@@ -579,7 +601,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct {
mu sync.Mutex
in io.Reader
c *Cipher
c *cipher
nonce nonce
buf []byte
readBuf []byte
@@ -589,7 +611,7 @@ type encrypter struct {
}
// newEncrypter creates a new file handle encrypting on the fly
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{
in: in,
c: c,
@@ -661,19 +683,13 @@ func (fh *encrypter) finish(err error) (int, error) {
}
// Encrypt data encrypts the data stream
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil)
if err != nil {
return nil, nil, err
return nil, err
}
return wrap(out), out, nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
return wrap(out), nil // and wrap the accounting back on
}
// decrypter decrypts an io.ReaderCloser on the fly
@@ -682,7 +698,7 @@ type decrypter struct {
rc io.ReadCloser
nonce nonce
initialNonce nonce
c *Cipher
c *cipher
buf []byte
readBuf []byte
bufIndex int
@@ -693,7 +709,7 @@ type decrypter struct {
}
// newDecrypter creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{
rc: rc,
c: c,
@@ -721,7 +737,7 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
@@ -996,7 +1012,7 @@ func (fh *decrypter) finishAndClose(err error) error {
}
// DecryptData decrypts the data stream
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc)
if err != nil {
return nil, err
@@ -1009,7 +1025,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil {
return nil, err
@@ -1018,7 +1034,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
}
// EncryptedSize calculates the size of the data when encrypted
func (c *Cipher) EncryptedSize(size int64) int64 {
func (c *cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 {
@@ -1028,7 +1044,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
}
// DecryptedSize calculates the size of the data when decrypted
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
func (c *cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize)
if size < 0 {
return 0, ErrorEncryptedFileTooShort
@@ -1047,6 +1063,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -785,7 +784,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in := &errorReader{io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
@@ -794,6 +793,14 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(32), n)
}
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct {
io.Reader
closed int
@@ -831,7 +838,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed)
}
er := &readers.ErrorReader{Err: errors.New("potato")}
er := &errorReader{errors.New("potato")}
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
@@ -857,7 +864,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in2 := &errorReader{io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
@@ -929,7 +936,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
assert.Equal(t, 0, n)
}
// Now try decoding it with an open/seek
// Now try decoding it with a open/seek
for _, offset := range trials {
for _, limit := range limits {
if offset+limit > len(plaintext) {
@@ -1111,7 +1118,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1)
in2 := &readers.ErrorReader{Err: errors.New("potato")}
in2 := &errorReader{errors.New("potato")}
in := io.MultiReader(in1, in2)
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)

View File

@@ -5,14 +5,12 @@ import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -27,7 +25,6 @@ func init() {
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -38,21 +35,19 @@ func init() {
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
},
},
}, {
Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact.
NB If filename_encryption is "off" then this option will do nothing.`,
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
@@ -68,25 +63,10 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
@@ -106,7 +86,7 @@ names, or for debugging purposes.`,
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (*Cipher, error) {
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
@@ -133,7 +113,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (*Cipher, error) {
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -159,25 +139,20 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Look for a file first
var wrappedFs fs.Fs
if rpath == "" {
wrappedFs, err = cache.Get(remote)
} else {
remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath))
wrappedFs, err = cache.Get(remotePath)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath))
wrappedFs, err = cache.Get(remotePath)
}
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
// if that didn't produce a file, look for a directory
if err != fs.ErrorIsFile {
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
}
if err != fs.ErrorIsFile && err != nil {
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote)
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
}
f := &Fs{
Fs: wrappedFs,
@@ -186,7 +161,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
cipher: cipher,
}
cache.PinUntilFinalized(f.Fs, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
@@ -198,7 +172,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -211,7 +184,6 @@ type Options struct {
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
}
@@ -223,7 +195,7 @@ type Fs struct {
root string
opt Options
features *fs.Features // optional features
cipher *Cipher
cipher Cipher
}
// Name of the remote (as passed into NewFs)
@@ -260,7 +232,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
*entries = append(*entries, f.newObject(obj))
}
// Encrypt a directory file name to entries.
// Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
@@ -347,7 +319,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
@@ -371,7 +343,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
@@ -430,18 +402,18 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
}
// Purge all files in the directory specified
// Purge all files in the root and the root directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
do := f.Fs.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, f.cipher.EncryptDirName(dir))
return do(ctx)
}
// Copy src to this remote using server side copy operations.
@@ -524,11 +496,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
@@ -581,37 +553,6 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
@@ -623,7 +564,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.newDecrypter(in)
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -648,7 +589,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read")
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
// Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// MergeDirs merges the contents of all the directories passed
@@ -675,7 +639,7 @@ func (f *Fs) DirCacheFlush() {
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
@@ -683,9 +647,9 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink)
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote(), expire, unlink)
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
@@ -720,67 +684,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make([]string, 0, len(arg))
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out = append(out, fileName)
}
return out, nil
case "encode":
out := make([]string, 0, len(arg))
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out = append(out, encryptedFileName)
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -922,15 +825,13 @@ func (f *Fs) Disconnect(ctx context.Context) error {
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
nonce nonce
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
nonce: nonce,
}
}
@@ -956,23 +857,6 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil
}
@@ -1011,7 +895,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@@ -1,143 +0,0 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs()
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encryptor
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

1618
backend/drive/drive.go Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -10,16 +10,12 @@ import (
"path/filepath"
"strings"
"testing"
"time"
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3"
@@ -272,142 +268,6 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/UnTrash
func (f *Fs) InternalTestUnTrash(t *testing.T) {
ctx := context.Background()
// Make some objects, one in a subdir
contents := random.String(100)
file1 := fstest.NewItem("trashDir/toBeTrashed", contents, time.Now())
_, obj1 := fstests.PutTestContents(ctx, t, f, &file1, contents, false)
file2 := fstest.NewItem("trashDir/subdir/toBeTrashed", contents, time.Now())
_, _ = fstests.PutTestContents(ctx, t, f, &file2, contents, false)
// Check objects
checkObjects := func() {
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{
file1,
file2,
}, []string{
"trashDir/subdir",
}, f.Precision())
}
checkObjects()
// Make sure we are using the trash
require.Equal(t, true, f.opt.UseTrash)
// Remove the object and the dir
require.NoError(t, obj1.Remove(ctx))
require.NoError(t, f.Purge(ctx, "trashDir/subdir"))
// Check objects gone
fstest.CheckListingWithRoot(t, f, "trashDir", []fstest.Item{}, []string{}, f.Precision())
// Restore the object and directory
r, err := f.unTrashDir(ctx, "trashDir", true)
require.NoError(t, err)
assert.Equal(t, unTrashResult{Errors: 0, Untrashed: 2}, r)
// Check objects restored
checkObjects()
// Remove the test dir
require.NoError(t, f.Purge(ctx, "trashDir"))
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -422,8 +282,6 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -11,15 +11,16 @@
package drive
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
@@ -87,15 +88,13 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
if size >= 0 {
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
}
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -117,19 +116,49 @@ func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {
totalSize = strconv.FormatInt(rx.ContentLength, 10)
}
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize))
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
req := rx.makeRequest(ctx, 0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
@@ -171,40 +200,18 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for finished := false; !finished; {
var reqSize int64
var chunk io.ReadSeeker
if rx.ContentLength >= 0 {
// If size known use repeatable reader for smoother bwlimit
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer
var n int
n, err = readers.ReadFill(rx.Media, buf)
if err == io.EOF {
// Send the last chunk with the correct ContentLength
// otherwise Google doesn't know we've finished
rx.ContentLength = start + int64(n)
finished = true
} else if err != nil {
return nil, err
}
reqSize = int64(n)
chunk = bytes.NewReader(buf[:reqSize])
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := rx.f.shouldRetry(err)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

187
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -45,15 +45,17 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2"
)
const enc = encodings.Dropbox
// Constants
const (
rcloneClientID = "5jcck7diasz0rqy"
@@ -111,21 +113,24 @@ var (
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
}
err := oauthutil.Config("dropbox", name, m, dropboxConfig, &opt)
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Dropbox App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Dropbox App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size. (< %v).
@@ -142,28 +147,14 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
// as invalid characters.
// Testing revealed names with trailing spaces and the DEL character don't work.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
}
// Fs represents a remote dropbox server
@@ -222,11 +213,7 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// First check for Insufficient Space
if strings.Contains(baseErrString, "insufficient_space") {
return false, fserrors.FatalError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
@@ -394,7 +381,7 @@ func (f *Fs) setRoot(root string) {
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
Path: enc.FromStandardPath(objPath),
})
return shouldRetry(err)
})
@@ -488,7 +475,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for {
if !started {
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Path: enc.FromStandardPath(root),
Recursive: false,
}
if root == "/" {
@@ -538,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Only the last element is reliably cased in PathDisplay
entryPath := metadata.PathDisplay
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
leaf := enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now())
@@ -596,7 +583,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// create it
arg2 := files.CreateFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Path: enc.FromStandardPath(root),
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
@@ -605,58 +592,50 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
root := path.Join(f.slashRoot, dir)
// can't remove root
if root == "/" {
return errors.New("can't remove root directory")
}
if check {
// check directory exists
_, err = f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
}
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(err)
})
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
root := path.Join(f.slashRoot, dir)
// can't remove root
if root == "/" {
return errors.New("can't remove root directory")
}
// check directory exists
_, err := f.getDirMetadata(root)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
root = enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
Recursive: false,
}
if root == "/" {
arg.Path = "" // Specify root folder as empty string
}
var res *files.ListFolderResult
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.ListFolder(&arg)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(res.Entries) != 0 {
return errors.New("directory not empty")
}
// remove it
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: root})
return shouldRetry(err)
})
return err
}
// Precision returns the precision
@@ -689,8 +668,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Copy
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
FromPath: enc.FromStandardPath(srcObj.remotePath()),
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error
@@ -721,8 +700,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{
Path: enc.FromStandardPath(f.slashRoot),
})
return shouldRetry(err)
})
return err
}
// Move src to this remote using server side move operations.
@@ -750,8 +736,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
FromPath: enc.FromStandardPath(srcObj.remotePath()),
ToPath: enc.FromStandardPath(dstObj.remotePath()),
},
}
var err error
@@ -777,17 +763,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
// FIXME this gives settings_error/not_authorized/.. errors
// and the expires setting isn't in the documentation so remove
// for now.
// Settings: &sharing.SharedLinkSettings{
// Expires: time.Now().Add(time.Duration(expire)).UTC().Round(time.Second),
// },
}
var linkRes sharing.IsSharedLinkMetadata
err = f.pacer.Call(func() (bool, error) {
@@ -860,8 +840,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcPath),
ToPath: f.opt.Enc.FromStandardPath(dstPath),
FromPath: enc.FromStandardPath(srcPath),
ToPath: enc.FromStandardPath(dstPath),
},
}
err = f.pacer.Call(func() (bool, error) {
@@ -1019,7 +999,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
Path: enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1131,13 +1111,6 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
@@ -1157,7 +1130,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
@@ -1183,7 +1156,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
Path: enc.FromStandardPath(o.remotePath()),
})
return shouldRetry(err)
})

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
@@ -18,7 +17,6 @@ import (
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
@@ -29,20 +27,6 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
// Detect this error which the integration tests provoke
// error HTTP error 403 (403 Forbidden) returned body: "{\"message\":\"Flood detected: IP Locked #374\",\"status\":\"KO\"}"
//
// https://1fichier.com/api.html
//
// file/ls.cgi is limited :
//
// Warning (can be changed in case of abuses) :
// List all files of the account is limited to 1 request per hour.
// List folders is limited to 5 000 results and 1 request per folder per 30s.
if err != nil && strings.Contains(err.Error(), "Flood detected") {
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
time.Sleep(30 * time.Second)
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
@@ -125,7 +109,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
item.Filename = enc.ToStandardName(item.Filename)
}
return filesList, nil
@@ -151,10 +135,10 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
foldersList.Name = enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
folder.Name = enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
@@ -163,6 +147,11 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -224,7 +213,7 @@ func getRemote(dir, fileName string) string {
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := f.opt.Enc.FromStandardName(leaf)
name := enc.FromStandardName(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
@@ -331,10 +320,10 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
fileName = enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
@@ -349,7 +338,6 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{

View File

@@ -11,26 +11,26 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
const enc = encodings.Fichier
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
@@ -38,48 +38,25 @@ func init() {
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Characters that need escaping
//
// '\\': '', // FULLWIDTH REVERSE SOLIDUS
// '<': '', // FULLWIDTH LESS-THAN SIGN
// '>': '', // FULLWIDTH GREATER-THAN SIGN
// '"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
// '\'': '', // FULLWIDTH APOSTROPHE
// '$': '', // FULLWIDTH DOLLAR SIGN
// '`': '', // FULLWIDTH GRAVE ACCENT
//
// Leading space and trailing space
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeDollar |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
@@ -87,9 +64,9 @@ type Fs struct {
root string
name string
features *fs.Features
opt Options
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
@@ -185,8 +162,8 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
@@ -199,7 +176,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
@@ -249,8 +226,8 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFolder != "" {
return f.listSharedFiles(ctx, f.opt.SharedFolder)
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
@@ -264,7 +241,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -298,7 +275,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Put in to the remote path with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
@@ -323,7 +300,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
@@ -334,12 +311,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
@@ -389,7 +366,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -397,6 +380,11 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err

View File

@@ -101,7 +101,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// Update in to the object with the modTime given of the given size
//
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {

View File

@@ -8,106 +8,83 @@ import (
"net/textproto"
"os"
"path"
"runtime"
"strings"
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
const enc = encodings.FTP
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "ftp",
Description: "FTP Connection",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: `Use FTPS over TLS (Implicit)
When using implicit FTP over TLS the client will connect using TLS
right from the start, which in turn breaks the compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
Help: `Use FTP over TLS (Explicit)
When using explicit FTP over TLS the client explicitly request
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocol can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
}},
Options: []fs.Option{
{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
Enc encoder.MultiEncoder `config:"encoding"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
}
// Fs represents a remote FTP server
@@ -162,69 +139,20 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Enable debugging output
type debugLog struct {
mu sync.Mutex
auth bool
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
//
// This writes debug info to the log
func (dl *debugLog) Write(p []byte) (n int, err error) {
dl.mu.Lock()
defer dl.mu.Unlock()
_, file, _, ok := runtime.Caller(1)
direction := "FTP Rx"
if ok && strings.Contains(file, "multi") {
direction = "FTP Tx"
}
lines := strings.Split(string(p), "\r\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
for _, line := range lines {
if !dl.auth && strings.HasPrefix(line, "PASS") {
fs.Debugf(direction, "PASS *****")
continue
}
fs.Debugf(direction, "%q", line)
}
return len(p), nil
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
if f.opt.TLS && f.opt.ExplicitTLS {
fs.Errorf(f, "Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
return nil, errors.New("Implicit TLS and explicit TLS are mutually incompatible. Please revise your config")
} else if f.opt.TLS {
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
} else if f.opt.ExplicitTLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
if fs.Config.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: fs.Config.Dump&fs.DumpAuth != 0}))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
@@ -253,11 +181,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil {
return c, nil
}
c, err = f.ftpConnection()
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
return f.ftpConnection()
}
// Return an FTP connection to the pool
@@ -270,13 +194,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
if pc == nil {
return
}
c := *pc
if c == nil {
return
}
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
@@ -390,22 +308,22 @@ func translateErrorDir(err error) error {
}
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func (f *Fs) entryToStandard(entry *ftp.Entry) {
func entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
entry.Name = enc.ToStandardName(entry.Name)
entry.Target = enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func (f *Fs) dirFromStandardPath(dir string) string {
func dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return f.opt.Enc.FromStandardPath(dir)
return enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory
@@ -427,13 +345,13 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(f.dirFromStandardPath(dir))
files, err := c.List(dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
f.entryToStandard(file)
entryToStandard(file)
if file.Name == base {
return file, nil
}
@@ -500,7 +418,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
@@ -537,7 +455,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for i := range files {
object := files[i]
f.entryToStandard(object)
entryToStandard(object)
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
@@ -607,7 +525,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(f.dirFromStandardPath(dir))
files, err := c.List(dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
@@ -615,7 +533,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
for i := range files {
file := files[i]
f.entryToStandard(file)
entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
@@ -653,7 +571,7 @@ func (f *Fs) mkdir(abspath string) error {
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
err = c.MakeDir(dirFromStandardPath(abspath))
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
@@ -689,7 +607,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
@@ -710,8 +628,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
enc.FromStandardPath(path.Join(f.root, remote)),
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -764,8 +682,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
f.dirFromStandardPath(dstPath),
dirFromStandardPath(srcPath),
dirFromStandardPath(dstPath),
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -851,23 +769,19 @@ func (f *ftpReadCloser) Close() error {
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil
}
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil
}
}
@@ -895,7 +809,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
@@ -930,11 +844,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
err = c.Stor(enc.FromStandardPath(path), in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
@@ -961,7 +874,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
err = c.Delete(enc.FromStandardPath(path))
o.fs.putFtpConnection(&c, err)
}
return err

View File

@@ -5,44 +5,13 @@ import (
"testing"
"github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPPureftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

View File

@@ -21,6 +21,7 @@ import (
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
@@ -31,13 +32,12 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -69,6 +69,8 @@ var (
}
)
const enc = encodings.GoogleCloudStorage
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -79,29 +81,30 @@ func init() {
Config: func(name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
if saFile != "" || saCreds != "" {
return
}
err := oauthutil.Config("google cloud storage", name, m, storageConfig, nil)
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "project_number",
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
}, {
Name: "service_account_credentials",
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
Hide: fs.OptionHideBoth,
}, {
Name: "anonymous",
Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
Default: false,
}, {
Name: "object_acl",
Help: "Access Control List for new objects.",
@@ -241,36 +244,24 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
Anonymous bool `config:"anonymous"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
Enc encoder.MultiEncoder `config:"encoding"`
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
@@ -362,7 +353,7 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
@@ -405,15 +396,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
if err != nil {
return nil, errors.Wrap(err, "error opening service account credentials file")
}
opt.ServiceAccountCredentials = string(loadedCreds)
}
if opt.Anonymous {
oAuthClient = &http.Client{}
} else if opt.ServiceAccountCredentials != "" {
if opt.ServiceAccountCredentials != "" {
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
if err != nil {
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
@@ -453,7 +442,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
return shouldRetry(err)
@@ -538,7 +527,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
if !strings.HasSuffix(remote, "/") {
continue
}
remote = f.opt.Enc.ToStandardPath(remote)
remote = enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
@@ -554,13 +543,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
remote := enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
@@ -631,7 +620,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
@@ -1068,33 +1057,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)

View File

@@ -30,7 +30,7 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -39,7 +39,7 @@ func TestAlbumsAdd(t *testing.T) {
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
@@ -49,8 +49,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2},
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -61,7 +61,7 @@ func TestAlbumsAdd(t *testing.T) {
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two"},
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
@@ -72,8 +72,8 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -86,7 +86,7 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
@@ -97,9 +97,9 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -114,8 +114,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
@@ -126,10 +126,10 @@ func TestAlbumsAdd(t *testing.T) {
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": {a0},
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
@@ -146,8 +146,8 @@ func TestAlbumsAdd(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}", "{0}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
@@ -181,9 +181,9 @@ func TestAlbumsDel(t *testing.T) {
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
@@ -198,16 +198,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
@@ -220,16 +220,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2}", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
@@ -240,16 +240,16 @@ func TestAlbumsDel(t *testing.T) {
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one", "two {2a}"},
"one": {"sub"},
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
@@ -258,16 +258,16 @@ func TestAlbumsDel(t *testing.T) {
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": {"one"},
"one": {"sub"},
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": {a1},
"two": {a2, a2a},
"one/sub": {a1sub},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)

View File

@@ -17,7 +17,7 @@ type Error struct {
Details ErrorDetails `json:"error"`
}
// Error satisfies error interface
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -95,7 +96,7 @@ func init() {
}
// Do the oauth
err = oauthutil.Config("google photos", name, m, oauthConfig, nil)
err = oauthutil.Config("google photos", name, m, oauthConfig)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
@@ -109,7 +110,13 @@ func init() {
`)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -127,20 +134,14 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
}
// Fs represents a remote storage server
@@ -201,11 +202,6 @@ func (f *Fs) dirTime() time.Time {
return f.startTime
}
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -228,10 +224,6 @@ func errorHandler(resp *http.Response) error {
if err != nil {
body = nil
}
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
@@ -951,9 +943,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
Options: options,
Method: "POST",
Path: "/uploads",
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",
@@ -1012,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
}
return nil
}

View File

@@ -12,7 +12,6 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -27,6 +26,17 @@ const (
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
@@ -56,7 +66,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -221,7 +231,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -23,7 +23,6 @@ type lister interface {
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
}
// dirPattern describes a single directory pattern
@@ -53,7 +52,6 @@ var patterns = dirPatterns{
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
fs.NewDir(prefix+"feature", f.dirTime()),
}, nil
},
},
@@ -191,28 +189,6 @@ var patterns = dirPatterns{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
{
re: `^feature$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return fs.DirEntries{
fs.NewDir(prefix+"favorites", f.dirTime()),
}, nil
},
},
{
re: `^feature/favorites$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
filter := featureFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^feature/favorites/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
@@ -224,7 +200,7 @@ func (ds dirPatterns) mustCompile() dirPatterns {
return ds
}
// match finds the path passed in the matching structure and
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
@@ -246,10 +222,11 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil
}
// Return the years from startYear to today
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := f.startYear(); year <= currentYear; year++ {
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
@@ -313,24 +290,6 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
return sf, nil
}
// featureFilter creates a filter for the Feature enum
//
// The API only supports one feature, FAVORITES, so hardcode that feature
//
// https://developers.google.com/photos/library/reference/rest/v1/mediaItems/search#FeatureFilter
func featureFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter) {
sf = api.SearchFilter{
Filters: &api.Filters{
FeatureFilter: &api.FeatureFilter{
IncludedFeatures: []string{
"FAVORITES",
},
},
},
}
return sf
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path

View File

@@ -59,11 +59,6 @@ func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
@@ -155,38 +150,6 @@ func TestPatternMatch(t *testing.T) {
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
{
root: "",
itemPath: "feature",
isFile: false,
wantMatch: []string{"feature"},
wantPrefix: "feature/",
wantPattern: &patterns[23],
},
{
root: "feature/favorites",
itemPath: "",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "",
wantPattern: &patterns[24],
},
{
root: "feature",
itemPath: "favorites",
isFile: false,
wantMatch: []string{"feature/favorites"},
wantPrefix: "favorites/",
wantPattern: &patterns[24],
},
{
root: "feature/favorites",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"feature/favorites/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[25],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)

View File

@@ -166,7 +166,8 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")

View File

@@ -21,7 +21,7 @@ func newAuth(f *Fs) *auth {
}
}
// Request constructs an http.Request for authentication
// Request constructs a http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {

View File

@@ -20,6 +20,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -38,7 +39,7 @@ var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
"credentials.r", // Read Openstack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
@@ -57,20 +58,26 @@ func init() {
Description: "Hubic",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("hubic", name, m, oauthConfig, nil)
err := oauthutil.Config("hubic", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
Options: append([]fs.Option{{
Name: config.ConfigClientID,
Help: "Hubic Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Hubic Client Secret\nLeave blank normally.",
}}, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Token string `json:"token"` // Openstack token
Endpoint string `json:"endpoint"` // Openstack endpoint
Expires string `json:"expires"` // Expires date - eg "2015-11-09T14:24:56+01:00"
}

View File

@@ -46,57 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// WellKnown contains some configuration parameters for setting up endpoints
type WellKnown struct {
Issuer string `json:"issuer"`
AuthorizationEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
UserinfoEndpoint string `json:"userinfo_endpoint"`
EndSessionEndpoint string `json:"end_session_endpoint"`
JwksURI string `json:"jwks_uri"`
CheckSessionIframe string `json:"check_session_iframe"`
GrantTypesSupported []string `json:"grant_types_supported"`
ResponseTypesSupported []string `json:"response_types_supported"`
SubjectTypesSupported []string `json:"subject_types_supported"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
ResponseNodesSupported []string `json:"response_modes_supported"`
RegistrationEndpoint string `json:"registration_endpoint"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
ClaimsSupported []string `json:"claims_supported"`
ClaimTypesSupported []string `json:"claim_types_supported"`
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
ScopesSupported []string `json:"scopes_supported"`
RequestParameterSupported bool `json:"request_parameter_supported"`
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
IntrospectionEndpoint string `json:"introspection_endpoint"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API
@@ -164,12 +120,6 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present

View File

@@ -4,9 +4,7 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
@@ -28,49 +26,47 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const enc = encodings.JottaCloud
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configVersion = 1
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
AuthURL: tokenURL,
TokenURL: tokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
@@ -81,37 +77,71 @@ func init() {
// needs to be done early so we can use oauth during config
fs.Register(&fs.RegInfo{
Name: "jottacloud",
Description: "Jottacloud",
Description: "JottaCloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
}
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
log.Fatalf("Failed to register device: %v", err)
}
refresh = (ver != configVersion) && (ver != v1configVersion)
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuth(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
if config.Confirm(false) {
v1config(ctx, name, m)
} else {
v2config(ctx, name, m)
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
},
Options: []fs.Option{{
@@ -120,13 +150,13 @@ func init() {
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Name: "unlink",
Help: "Remove existing public link to file/folder with link command rather than creating.\nDefault is false, meaning link command will create or retrieve public link.",
Default: false,
Advanced: true,
}, {
@@ -134,29 +164,18 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as xml doesn't handle them properly.
//
// Also: '*', '/', ':', '<', '>', '?', '\"', '\x00', '|'
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
}
// Fs represents a remote jottacloud
@@ -208,7 +227,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a box 'url'
// parsePath parses an box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -230,77 +249,12 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// v1config configure a jottacloud backend using legacy authentification
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
@@ -312,7 +266,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
opts := rest.Opts{
Method: "POST",
RootURL: v1registerURL,
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
@@ -323,8 +277,8 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
// doAuthV1 runs the actual token request for V1 authentification
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
@@ -353,7 +307,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
@@ -365,106 +319,6 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentification
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
// decode login token
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
// retrieve endpoint urls
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
@@ -553,7 +407,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
@@ -602,17 +456,17 @@ func errorHandler(resp *http.Response) error {
return errResponse
}
// Jottacloud wants '+' to be URL encoded even though the RFC states it's not reserved
// Jottacloud want's '+' to be URL encoded even though the RFC states it's not reserved
func urlPathEscape(in string) string {
return strings.Replace(rest.URLPathEscape(in), "+", "%2B", -1)
}
// filePathRaw returns an unescaped file path (f.root, file)
func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
}
// filePath returns an escaped file path (f.root, file)
// filePath returns a escaped file path (f.root, file)
func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
@@ -624,7 +478,7 @@ func (f *Fs) filePath(file string) string {
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if v1tokenURL == req.URL.String() {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
@@ -650,64 +504,35 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == v1configVersion)
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if ver == configVersion {
oauthConfig.ClientID = "jottacli"
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == v1configVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
// Create OAuth Client
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
f := &Fs{
name: name,
root: root,
@@ -723,9 +548,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
@@ -845,32 +667,26 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, errors.Wrap(err, "couldn't list files")
}
if bool(result.Deleted) && !f.opt.TrashedOnly {
if result.Deleted {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if !f.opt.TrashedOnly && bool(item.Deleted) {
if item.Deleted {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
remote := path.Join(dir, enc.ToStandardName(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
}
for i := range result.Files {
item := &result.Files[i]
if f.opt.TrashedOnly {
if !item.Deleted || item.State != "COMPLETED" {
continue
}
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
}
if item.Deleted || item.State != "COMPLETED" {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
remote := path.Join(dir, enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
if err != nil {
continue
@@ -895,7 +711,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if folder.Deleted {
return nil
}
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
@@ -913,7 +729,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if file.Deleted || file.State != "COMPLETED" {
continue
}
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil {
return err
@@ -1072,8 +888,8 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
@@ -1084,7 +900,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
Parameters: url.Values{},
}
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -1191,7 +1007,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
@@ -1200,14 +1016,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(remote),
Parameters: url.Values{},
}
if unlink {
if f.opt.Unlink {
opts.Parameters.Set("mode", "disableShare")
} else {
opts.Parameters.Set("mode", "enableShare")
@@ -1227,12 +1043,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
}
if err != nil {
if unlink {
if f.opt.Unlink {
return "", errors.Wrap(err, "couldn't remove public link")
}
return "", errors.Wrap(err, "couldn't create public link")
}
if unlink {
if f.opt.Unlink {
if result.PublicSharePath != "" {
return "", errors.Errorf("couldn't remove public link - %q", result.PublicSharePath)
}
@@ -1262,22 +1078,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "files/v1/purge_trash",
}
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1303,7 +1103,7 @@ func (o *Object) Remote() string {
return o.remote
}
// filePath returns an escaped file path (f.root, remote)
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
@@ -1351,7 +1151,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil {
return err
}
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
if info.Deleted {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
@@ -1408,7 +1208,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The cleanup function should be called when out is finished with
// regardless of whether this function returned an error or not.
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
// we need an MD5
// we need a MD5
md5Hasher := md5.New()
// use the teeReader to write to the local file AND calculate the MD5 while doing so
teeReader := io.TeeReader(in, md5Hasher)
@@ -1488,7 +1288,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1499,7 +1298,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
}
// send it
@@ -1585,7 +1384,6 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -12,74 +12,67 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
const enc = encodings.Koofr
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
Enc encoder.MultiEncoder `config:"encoding"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
}
// An Fs is a representation of a remote Koofr Fs
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
@@ -250,9 +243,9 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from an Fs root relative path,
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
return enc.FromStandardPath(path.Join("/", f.root, part))
}
// NewFs constructs a new filesystem given a root path and configuration options
@@ -266,9 +259,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil {
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
@@ -305,7 +296,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
@@ -323,7 +314,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
remote := path.Join(dir, enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
} else {
@@ -421,7 +412,7 @@ func translateErrorsObject(err error) error {
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
@@ -603,7 +594,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)

View File

@@ -4,7 +4,6 @@ package local
import (
"context"
"os"
"syscall"
"github.com/pkg/errors"
@@ -16,9 +15,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s)
if err != nil {
if os.IsNotExist(err) {
return nil, fs.ErrorDirNotFound
}
return nil, errors.Wrap(err, "failed to read disk usage")
}
bs := int64(s.Bsize) // nolint: unconvert

View File

@@ -2,10 +2,8 @@
package local
import "github.com/rclone/rclone/lib/encoder"
import (
"github.com/rclone/rclone/fs/encodings"
)
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)
const enc = encodings.LocalMacOS

View File

@@ -2,7 +2,8 @@
package local
import "github.com/rclone/rclone/lib/encoder"
import (
"github.com/rclone/rclone/fs/encodings"
)
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base
const enc = encodings.LocalUnix

View File

@@ -2,32 +2,8 @@
package local
import "github.com/rclone/rclone/lib/encoder"
import (
"github.com/rclone/rclone/fs/encodings"
)
// This is the encoding used by the local backend for windows platforms
//
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
//
// Additionally names can't end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
//
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
const defaultEnc = (encoder.Base |
encoder.EncodeWin |
encoder.EncodeBackSlash |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8)
const enc = encodings.LocalWindows

View File

@@ -1,4 +1,4 @@
// +build windows plan9 js
// +build windows plan9
package local

View File

@@ -1,4 +1,4 @@
// +build !windows,!plan9,!js
// +build !windows,!plan9
package local

View File

@@ -10,6 +10,7 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
@@ -19,12 +20,10 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
)
@@ -40,7 +39,6 @@ func init() {
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
@@ -89,24 +87,7 @@ are being uploaded and aborts with a message which starts "can't copy
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.
If this flag is set, rclone will use its best efforts to transfer a
file which is being updated. If the file is only having things
appended to it (eg a log) then rclone will transfer the log file with
the size it had the first time rclone saw it.
If the file is being modified throughout (not just appended to) then
the transfer may fail with a hash check failure.
In detail, once the file has had stat() called on it for the first
time we:
- Only transfer the size that stat gave
- Only checksum the size that stat gave
- Don't update the stat info for the file
`,
check can be disabled with this flag.`,
Default: false,
Advanced: true,
}, {
@@ -134,32 +115,6 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "no_sparse",
Help: `Disable sparse files for multi-thread downloads
On Windows platforms rclone will make sparse files when doing
multi-thread downloads. This avoids long pauses on large files where
the OS zeros the file. However sparse files may be undesirable as they
cause disk fragmentation and can be slow to work with.`,
Default: false,
Advanced: true,
}, {
Name: "no_set_modtime",
Help: `Disable setting modtime
Normally rclone updates modification time of files after they are done
uploading. This can cause permissions issues on Linux platforms when
the user rclone is running as does not own the file uploaded, such as
when copying to a CIFS mount owned by another user. If this option is
enabled, rclone will no longer update the modtime after copying a file.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: defaultEnc,
}},
}
fs.Register(fsi)
@@ -167,18 +122,15 @@ enabled, rclone will no longer update the modtime after copying a file.`,
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
Enc encoder.MultiEncoder `config:"encoding"`
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
}
// Fs represents a local filesystem rooted at root
@@ -194,22 +146,20 @@ type Fs struct {
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
objectMetaMu sync.RWMutex // global lock for Object metadata
lstat func(name string) (os.FileInfo, error)
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
// When using these items the fs.objectMetaMu must be held
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
// these are read only and don't need the mutex held
translatedLink bool // Is this object a translated link
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
hashes map[hash.Type]string // Hashes
translatedLink bool // Is this object a translated link
}
// ------------------------------------------------------------
@@ -239,12 +189,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
dev: devUnset,
lstat: os.Lstat,
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.root = cleanRootPath(root, f.opt.NoUNC)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
SlowHash: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -285,7 +234,7 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root))
return enc.ToStandardPath(filepath.ToSlash(f.root))
}
// String converts this Fs to a string
@@ -401,7 +350,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
@@ -437,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
@@ -460,7 +409,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
accounting.Stats(ctx).Error(err)
continue
}
if err != nil {
@@ -494,7 +443,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
remote = path.Join(dir, enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
f.warnedMu.Lock()
@@ -508,7 +457,7 @@ func (f *Fs) cleanRemote(dir, filename string) (remote string) {
}
func (f *Fs) localPath(name string) string {
return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name)))
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
}
// Put the Object to the local filesystem
@@ -554,10 +503,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Precision of the file system
func (f *Fs) Precision() (precision time.Duration) {
if f.opt.NoSetModTime {
return fs.ModTimeNotSupported
}
f.precisionOk.Do(func() {
f.precision = f.readPrecision()
})
@@ -616,25 +561,20 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
// Purge deletes all the files in the directory
// Purge deletes all the files and directories
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
dir = f.localPath(dir)
fi, err := f.lstat(dir)
func (f *Fs) Purge(ctx context.Context) error {
fi, err := f.lstat(f.root)
if err != nil {
// already purged
if os.IsNotExist(err) {
return fs.ErrorDirNotFound
}
return err
}
if !fi.Mode().IsDir() {
return errors.Errorf("can't purge non directory: %q", dir)
return errors.Errorf("can't purge non directory: %q", f.root)
}
return os.RemoveAll(dir)
return os.RemoveAll(f.root)
}
// Move src to this remote using server side move operations.
@@ -655,9 +595,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Temporary Object under construction
dstObj := f.newObject(remote)
dstObj.fs.objectMetaMu.RLock()
dstObjMode := dstObj.mode
dstObj.fs.objectMetaMu.RUnlock()
// Check it is a file if it exists
err := dstObj.lstat()
@@ -665,7 +602,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// OK
} else if err != nil {
return nil, err
} else if !dstObj.fs.isRegular(dstObjMode) {
} else if !dstObj.fs.isRegular(dstObj.mode) {
// It isn't a file
return nil, errors.New("can't move file onto non-file")
}
@@ -752,50 +689,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Supported()
}
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
if txt == "" {
txt = "unspecified error"
}
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt
return out, nil
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -819,31 +712,19 @@ func (o *Object) Remote() string {
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
// Check that the underlying file hasn't changed
o.fs.objectMetaMu.RLock()
oldtime := o.modTime
oldsize := o.size
o.fs.objectMetaMu.RUnlock()
err := o.lstat()
var changed bool
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
}
} else {
o.fs.objectMetaMu.RLock()
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
o.fs.objectMetaMu.RUnlock()
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectMetaMu.RLock()
o.fs.objectHashesMu.Lock()
hashes := o.hashes
hashValue, hashFound := o.hashes[r]
o.fs.objectMetaMu.RUnlock()
o.fs.objectHashesMu.Unlock()
if changed || !hashFound {
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
var in io.ReadCloser
if !o.translatedLink {
@@ -855,14 +736,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
} else {
in, err = o.openTranslatedLink(0, -1)
}
// If not checking for updates, only read size given
if o.fs.opt.NoCheckUpdated {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
var hashes map[hash.Type]string
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
closeErr := in.Close()
if err != nil {
@@ -872,36 +748,29 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", errors.Wrap(closeErr, "hash: failed to close")
}
hashValue = hashes[r]
o.fs.objectMetaMu.Lock()
o.fs.objectHashesMu.Lock()
if o.hashes == nil {
o.hashes = hashes
} else {
o.hashes[r] = hashValue
}
o.fs.objectMetaMu.Unlock()
o.fs.objectHashesMu.Unlock()
}
return hashValue, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
o.fs.objectMetaMu.RLock()
defer o.fs.objectMetaMu.RUnlock()
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
if o.fs.opt.NoSetModTime {
return nil
}
var err error
if o.translatedLink {
err = lChtimes(o.path, modTime, modTime)
@@ -917,9 +786,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
o.fs.objectMetaMu.RLock()
mode := o.mode
o.fs.objectMetaMu.RUnlock()
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
@@ -952,15 +819,11 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
if err != nil {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
file.o.fs.objectMetaMu.RLock()
oldtime := file.o.modTime
oldsize := file.o.size
file.o.fs.objectMetaMu.RUnlock()
if oldsize != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !oldtime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
@@ -977,9 +840,9 @@ func (file *localOpenFile) Close() (err error) {
err = file.in.Close()
if err == nil {
if file.hash.Size() == file.o.Size() {
file.o.fs.objectMetaMu.Lock()
file.o.fs.objectHashesMu.Lock()
file.o.hashes = file.hash.Sums()
file.o.fs.objectMetaMu.Unlock()
file.o.fs.objectHashesMu.Unlock()
}
}
return err
@@ -1004,7 +867,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
@@ -1019,13 +882,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
// If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload.
if limit < 0 && o.fs.opt.NoCheckUpdated {
limit = o.size
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
@@ -1100,20 +956,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
if runtime.GOOS == "windows" && os.IsPermission(err) {
// If permission denied on Windows might be trying to update a
// hidden file, in which case try opening without CREATE
// See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
return err
}
} else {
return err
}
return err
}
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
@@ -1162,9 +1008,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// All successful so update the hashes
if hasher != nil {
o.fs.objectMetaMu.Lock()
o.fs.objectHashesMu.Lock()
o.hashes = hasher.Sums()
o.fs.objectMetaMu.Unlock()
o.fs.objectHashesMu.Unlock()
}
// Set the mtime
@@ -1177,8 +1023,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.lstat()
}
var sparseWarning sync.Once
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
@@ -1202,47 +1046,29 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
err = file.PreAllocate(size, out)
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
if !f.opt.NoSparse && file.SetSparseImplemented {
sparseWarning.Do(func() {
fs.Infof(nil, "Writing sparse files: use --local-no-sparse or --multi-thread-streams 0 to disable")
})
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Errorf(o, "Failed to set sparse: %v", err)
}
}
return out, nil
}
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// if not checking updated then don't update the stat
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
return
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {
o.size = info.Size()
}
o.fs.objectMetaMu.Lock()
o.size = info.Size()
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
if runtime.GOOS == "windows" && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
if !o.modTime.Equal(info.ModTime()) {
o.modTime = info.ModTime()
}
if o.mode != info.Mode() {
o.mode = info.Mode()
}
}
// Stat an Object into info
// Stat a Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
if err == nil {
@@ -1256,22 +1082,22 @@ func (o *Object) Remove(ctx context.Context) error {
return remove(o.path)
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
func cleanRootPath(s string, noUNC bool) string {
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
s = uncPath(s)
}
return s
}
@@ -1285,6 +1111,28 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
return s
}
// Pattern to match a windows absolute path: "c:\" and similar
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(l string) string {
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(l, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(l, `\\?\`) {
return l
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
}
if isAbsWinDrive.MatchString(l) {
return `\\?\` + l
}
return l
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -1292,7 +1140,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -6,6 +6,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")

View File

@@ -0,0 +1,10 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}

View File

@@ -1,6 +1,6 @@
//+build linux
package file
package local
import (
"os"
@@ -18,12 +18,8 @@ var (
fallocFlagsIndex int32
)
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = true
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
@@ -48,12 +44,3 @@ again:
// }
return err
}
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = false
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
return nil
}

View File

@@ -1,6 +1,6 @@
//+build windows
package file
package local
import (
"os"
@@ -32,12 +32,8 @@ type ioStatusBlock struct {
Status, Information uintptr
}
// PreallocateImplemented is a constant indicating whether the
// implementation of Preallocate actually does anything.
const PreallocateImplemented = true
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
@@ -81,21 +77,3 @@ func PreAllocate(size int64, out *os.File) error {
return nil
}
const (
FSCTL_SET_SPARSE = 0x000900c4
)
// SetSparseImplemented is a constant indicating whether the
// implementation of SetSparse actually does anything.
const SetSparseImplemented = true
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
var bytesReturned uint32
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, &bytesReturned, nil)
if err != nil {
return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE")
}
return nil
}

View File

@@ -5,13 +5,56 @@ import (
"testing"
)
var uncTestPaths = []string{
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
`C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\server\share`,
}
var uncTestPathsResults = []string{
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\?\UNC\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\?\UNC\server\share`,
}
// Test that UNC paths are converted.
func TestUncPaths(t *testing.T) {
for i, p := range uncTestPaths {
unc := uncPath(p)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
// Test we don't add more.
unc = uncPath(unc)
if unc != uncTestPathsResults[i] {
t.Fatalf("UNC test path\nInput:%s\nOutput:%s\nExpected:%s", p, unc, uncTestPathsResults[i])
}
}
}
// Test Windows character replacements
var testsWindows = [][2]string{
{`c:\temp`, `c:\temp`},
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`c:/temp`, `c:\temp`},
{`C:/temp/file.txt`, `C:\temp\file.txt`},
{`/temp/file.txt`, `\temp\file.txt`},
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\#¤%&\()=;^+-`},
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\&\&\&`},
}
@@ -21,7 +64,7 @@ func TestCleanWindows(t *testing.T) {
t.Skipf("windows only")
}
for _, test := range testsWindows {
got := cleanRootPath(test[0], true, defaultEnc)
got := cleanRootPath(test[0], true)
expect := test[1]
if got != expect {
t.Fatalf("got %q, expected %q", got, expect)

View File

@@ -117,7 +117,7 @@ type ListItem struct {
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime uint64 `json:"mtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
@@ -159,6 +159,71 @@ type FolderInfoResponse struct {
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`

View File

@@ -24,26 +24,26 @@ import (
"github.com/rclone/rclone/backend/mailru/mrhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
const enc = encodings.Mailru
// Global constants
const (
minSleepPacer = 10 * time.Millisecond
@@ -193,31 +193,21 @@ facilitate remote troubleshooting of backend issues. Strict meaning of
flags is not documented and not guaranteed to persist between releases.
Quirks will be removed when the backend grows stable.
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Username string `config:"user"`
Password string `config:"pass"`
UserAgent string `config:"user_agent"`
CheckHash bool `config:"check_hash"`
SpeedupEnable bool `config:"speedup_enable"`
SpeedupPatterns string `config:"speedup_file_patterns"`
SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"`
SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"`
Quirks string `config:"quirks"`
Enc encoder.MultiEncoder `config:"encoding"`
Username string `config:"user"`
Password string `config:"pass"`
UserAgent string `config:"user_agent"`
CheckHash bool `config:"check_hash"`
SpeedupEnable bool `config:"speedup_enable"`
SpeedupPatterns string `config:"speedup_file_patterns"`
SpeedupMaxDisk fs.SizeSuffix `config:"speedup_max_disk"`
SpeedupMaxMem fs.SizeSuffix `config:"speedup_max_memory"`
Quirks string `config:"quirks"`
}
// retryErrorCodes is a slice of error codes that we will retry
@@ -403,7 +393,7 @@ func (q *quirks) parseQuirks(option string) {
// "Accept-Encoding: gzip" header. However, enabling compression
// might be good for performance.
// Use this quirk to investigate the performance impact.
// Remove this quirk if performance does not improve.
// Remove this quirk if perfomance does not improve.
q.gzip = true
case "insecure":
// The mailru disk-o protocol is not documented. To compare HTTP
@@ -617,7 +607,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
Path: "/api/m1/file",
Parameters: url.Values{
"access_token": {token},
"home": {f.opt.Enc.FromStandardPath(path)},
"home": {enc.FromStandardPath(path)},
"offset": {"0"},
"limit": {strconv.Itoa(maxInt32)},
},
@@ -652,18 +642,13 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
// =0 - for an empty directory
// >0 - for a non-empty directory
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
remote, err := f.relPath(f.opt.Enc.ToStandardPath(item.Home))
remote, err := f.relPath(enc.ToStandardPath(item.Home))
if err != nil {
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
switch item.Kind {
case "folder":
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
@@ -677,7 +662,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(mTime, 0),
modTime: time.Unix(item.Mtime, 0),
}
return file, -1, nil
default:
@@ -723,7 +708,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
params.Set("limit", strconv.Itoa(limit))
data := url.Values{}
data.Set("home", f.opt.Enc.FromStandardPath(dirPath))
data.Set("home", enc.FromStandardPath(dirPath))
opts := rest.Opts{
Method: "POST",
@@ -771,7 +756,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
req := api.NewBinWriter()
req.WritePu16(api.OperationFolderList)
req.WriteString(f.opt.Enc.FromStandardPath(dirPath))
req.WriteString(enc.FromStandardPath(dirPath))
req.WritePu32(int64(depth))
req.WritePu32(int64(options))
req.WritePu32(0)
@@ -907,7 +892,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
if (head & 4096) != 0 {
t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength)
}
name := t.f.opt.Enc.FromStandardPath(string(r.ReadBytesByLength()))
name := enc.FromStandardPath(string(r.ReadBytesByLength()))
t.dunno1 = int(r.ReadULong())
t.dunno2 = 0
t.dunno3 = 0
@@ -1046,7 +1031,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
req := api.NewBinWriter()
req.WritePu16(api.OperationCreateFolder)
req.WritePu16(0) // revision
req.WriteString(f.opt.Enc.FromStandardPath(path))
req.WriteString(enc.FromStandardPath(path))
req.WritePu32(0)
token, err := f.accessToken()
@@ -1168,12 +1153,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeWithCheck(ctx, dir, true, "rmdir")
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the root directory
// Optional interface: Only implement this if you have a way of deleting
// all the files quicker than just running Remove() on the result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
func (f *Fs) Purge(ctx context.Context) error {
// fs.Debugf(f, ">>> Purge")
return f.purgeWithCheck(ctx, dir, false, "purge")
return f.purgeWithCheck(ctx, "", false, "purge")
}
// purgeWithCheck() removes the root directory.
@@ -1201,7 +1186,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
return err
}
data := url.Values{"home": {f.opt.Enc.FromStandardPath(path)}}
data := url.Values{"home": {enc.FromStandardPath(path)}}
opts := rest.Opts{
Method: "POST",
Path: "/api/m1/file/remove",
@@ -1258,8 +1243,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
data := url.Values{}
data.Set("home", f.opt.Enc.FromStandardPath(srcPath))
data.Set("folder", f.opt.Enc.FromStandardPath(parentDir(dstPath)))
data.Set("home", enc.FromStandardPath(srcPath))
data.Set("folder", enc.FromStandardPath(parentDir(dstPath)))
data.Set("email", f.opt.Username)
data.Set("x-email", f.opt.Username)
@@ -1297,7 +1282,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fmt.Errorf("copy failed with code %d", response.Status)
}
tmpPath := f.opt.Enc.ToStandardPath(response.Body)
tmpPath := enc.ToStandardPath(response.Body)
if tmpPath != dstPath {
// fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file")
@@ -1372,9 +1357,9 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
req := api.NewBinWriter()
req.WritePu16(api.OperationRename)
req.WritePu32(0) // old revision
req.WriteString(f.opt.Enc.FromStandardPath(srcPath))
req.WriteString(enc.FromStandardPath(srcPath))
req.WritePu32(0) // new revision
req.WriteString(f.opt.Enc.FromStandardPath(dstPath))
req.WriteString(enc.FromStandardPath(dstPath))
req.WritePu32(0) // dunno
opts := rest.Opts{
@@ -1456,7 +1441,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
// fs.Debugf(f, ">>> PublicLink %q", remote)
token, err := f.accessToken()
@@ -1465,7 +1450,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
}
data := url.Values{}
data.Set("home", f.opt.Enc.FromStandardPath(f.absPath(remote)))
data.Set("home", enc.FromStandardPath(f.absPath(remote)))
data.Set("email", f.opt.Username)
data.Set("x-email", f.opt.Username)
@@ -1867,30 +1852,30 @@ func (f *Fs) uploadShard(ctx context.Context) (string, error) {
return f.shardURL, nil
}
opts := rest.Opts{
RootURL: api.DispatchServerURL,
Method: "GET",
Path: "/u",
}
var (
res *http.Response
url string
err error
)
err = f.pacer.Call(func() (bool, error) {
res, err = f.srv.Call(ctx, &opts)
if err == nil {
url, err = readBodyWord(res)
}
return fserrors.ShouldRetry(err), err
})
token, err := f.accessToken()
if err != nil {
closeBody(res)
return "", err
}
f.shardURL = url
opts := rest.Opts{
Method: "GET",
Path: "/api/m1/dispatcher",
Parameters: url.Values{
"client_id": {api.OAuthClientID},
"access_token": {token},
},
}
var info api.ShardInfoResponse
err = f.pacer.Call(func() (bool, error) {
res, err := f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(res, err, f, &opts)
})
if err != nil {
return "", err
}
f.shardURL = info.Body.Upload[0].URL
f.shardExpiry = time.Now().Add(shardExpirySec * time.Second)
fs.Debugf(f, "new upload shard: %s", f.shardURL)
@@ -2030,7 +2015,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req := api.NewBinWriter()
req.WritePu16(api.OperationAddFile)
req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WriteString(enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size)
req.WritePu64(o.modTime.Unix())
req.WritePu32(0)
@@ -2122,29 +2107,21 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
start, end, partialRequest := getTransferRange(o.size, options...)
headers := map[string]string{
"Accept": "*/*",
"Content-Type": "application/octet-stream",
}
if partialRequest {
rangeStr := fmt.Sprintf("bytes=%d-%d", start, end-1)
headers["Range"] = rangeStr
// headers["Content-Range"] = rangeStr
headers["Accept-Ranges"] = "bytes"
}
start, end, partial := getTransferRange(o.size, options...)
// TODO: set custom timeouts
opts := rest.Opts{
Method: "GET",
Options: options,
Path: url.PathEscape(strings.TrimLeft(o.fs.opt.Enc.FromStandardPath(o.absPath()), "/")),
Path: url.PathEscape(strings.TrimLeft(enc.FromStandardPath(o.absPath()), "/")),
Parameters: url.Values{
"client_id": {api.OAuthClientID},
"token": {token},
},
ExtraHeaders: headers,
ExtraHeaders: map[string]string{
"Accept": "*/*",
"Range": fmt.Sprintf("bytes=%d-%d", start, end-1),
},
}
var res *http.Response
@@ -2165,36 +2142,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, err
}
// Server should respond with Status 206 and Content-Range header to a range
// request. Status 200 (and no Content-Range) means a full-content response.
partialResponse := res.StatusCode == 206
var (
hasher gohash.Hash
wrapStream io.ReadCloser
)
if !partialResponse {
var hasher gohash.Hash
if !partial {
// Cannot check hash of partial download
hasher = mrhash.New()
}
wrapStream = &endHandler{
wrapStream := &endHandler{
ctx: ctx,
stream: res.Body,
hasher: hasher,
o: o,
server: server,
}
if partialRequest && !partialResponse {
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
return nil, err
}
}
wrapStream = readers.NewLimitedReadCloser(wrapStream, end-start)
}
return wrapStream, nil
}

View File

@@ -26,18 +26,19 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
mega "github.com/t3rm1n4l/go-mega"
)
const enc = encodings.Mega
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
@@ -82,24 +83,16 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
}
// Fs represents a remote mega
@@ -150,7 +143,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a mega 'url'
// parsePath parses an mega 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -257,12 +250,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// splitNodePath splits nodePath into / separated parts, returning nil if it
// should refer to the root.
// It also encodes the parts into backend specific encoding
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
func splitNodePath(nodePath string) (parts []string) {
nodePath = path.Clean(nodePath)
if nodePath == "." || nodePath == "/" {
return nil
}
nodePath = f.opt.Enc.FromStandardPath(nodePath)
nodePath = enc.FromStandardPath(nodePath)
return strings.Split(nodePath, "/")
}
@@ -270,7 +263,7 @@ func (f *Fs) splitNodePath(nodePath string) (parts []string) {
//
// It returns mega.ENOENT if it wasn't found
func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
parts := f.splitNodePath(nodePath)
parts := splitNodePath(nodePath)
if parts == nil {
return rootNode, nil
}
@@ -327,7 +320,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
parts := f.splitNodePath(dir)
parts := splitNodePath(dir)
if parts == nil {
return rootNode, nil
}
@@ -429,7 +422,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
errors := 0
// similar to f.deleteNode(trash) but with HardDelete as true
for _, item := range items {
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
fs.Debugf(f, "Deleting trash %q", enc.ToStandardName(item.GetName()))
deleteErr := f.pacer.Call(func() (bool, error) {
err := f.srv.Delete(item, true)
return shouldRetry(err)
@@ -511,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
var iErr error
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
remote := path.Join(dir, enc.ToStandardName(info.GetName()))
switch info.GetType() {
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
@@ -669,13 +662,13 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck("", false)
}
// move a file or folder (srcFs, srcRemote, info) to (f, dstRemote)
@@ -733,7 +726,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
if srcLeaf != dstLeaf {
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
err = f.srv.Rename(info, enc.FromStandardName(dstLeaf))
return shouldRetry(err)
})
if err != nil {
@@ -836,7 +829,7 @@ func (f *Fs) Hashes() hash.Set {
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
root, err := f.findRoot(false)
if err != nil {
return "", errors.Wrap(err, "PublicLink failed to find root node")
@@ -871,7 +864,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return errors.Errorf("MergeDirs failed to find node for: %v", srcDir)
}
// list the objects
// list the the objects
infos := []*mega.Node{}
_, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
infos = append(infos, info)
@@ -882,13 +875,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// move them into place
for _, info := range infos {
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
fs.Infof(srcDir, "merging %q", enc.ToStandardName(info.GetName()))
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", enc.ToStandardName(info.GetName()), srcDir)
}
}
// rmdir (into trash) the now empty source directory
@@ -1131,7 +1124,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *mega.Upload
err = o.fs.pacer.Call(func() (bool, error) {
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
u, err = o.fs.srv.NewUpload(dirNode, enc.FromStandardName(leaf), size)
return shouldRetry(err)
})
if err != nil {

View File

@@ -1,624 +0,0 @@
// Package memory provides an interface to an in memory object storage system
package memory
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
)
var (
hashType = hash.MD5
// the object storage is persistent
buckets = newBucketsInfo()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "memory",
Description: "In memory object storage system.",
NewFs: NewFs,
Options: []fs.Option{},
})
}
// Options defines the configuration for this backend
type Options struct {
}
// Fs represents a remote memory server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
features *fs.Features // optional features
}
// bucketsInfo holds info about all the buckets
type bucketsInfo struct {
mu sync.RWMutex
buckets map[string]*bucketInfo
}
func newBucketsInfo() *bucketsInfo {
return &bucketsInfo{
buckets: make(map[string]*bucketInfo, 16),
}
}
// getBucket gets a names bucket or nil
func (bi *bucketsInfo) getBucket(name string) (b *bucketInfo) {
bi.mu.RLock()
b = bi.buckets[name]
bi.mu.RUnlock()
return b
}
// makeBucket returns the bucket or makes it
func (bi *bucketsInfo) makeBucket(name string) (b *bucketInfo) {
bi.mu.Lock()
defer bi.mu.Unlock()
b = bi.buckets[name]
if b != nil {
return b
}
b = newBucketInfo()
bi.buckets[name] = b
return b
}
// deleteBucket deleted the bucket or returns an error
func (bi *bucketsInfo) deleteBucket(name string) error {
bi.mu.Lock()
defer bi.mu.Unlock()
b := bi.buckets[name]
if b == nil {
return fs.ErrorDirNotFound
}
if !b.isEmpty() {
return fs.ErrorDirectoryNotEmpty
}
delete(bi.buckets, name)
return nil
}
// getObjectData gets an object from (bucketName, bucketPath) or nil
func (bi *bucketsInfo) getObjectData(bucketName, bucketPath string) (od *objectData) {
b := bi.getBucket(bucketName)
if b == nil {
return nil
}
return b.getObjectData(bucketPath)
}
// updateObjectData updates an object from (bucketName, bucketPath)
func (bi *bucketsInfo) updateObjectData(bucketName, bucketPath string, od *objectData) {
b := bi.makeBucket(bucketName)
b.mu.Lock()
b.objects[bucketPath] = od
b.mu.Unlock()
}
// removeObjectData removes an object from (bucketName, bucketPath) returning true if removed
func (bi *bucketsInfo) removeObjectData(bucketName, bucketPath string) (removed bool) {
b := bi.getBucket(bucketName)
if b != nil {
b.mu.Lock()
od := b.objects[bucketPath]
if od != nil {
delete(b.objects, bucketPath)
removed = true
}
b.mu.Unlock()
}
return removed
}
// bucketInfo holds info about a single bucket
type bucketInfo struct {
mu sync.RWMutex
objects map[string]*objectData
}
func newBucketInfo() *bucketInfo {
return &bucketInfo{
objects: make(map[string]*objectData, 16),
}
}
// getBucket gets a names bucket or nil
func (bi *bucketInfo) getObjectData(name string) (od *objectData) {
bi.mu.RLock()
od = bi.objects[name]
bi.mu.RUnlock()
return od
}
// getBucket gets a names bucket or nil
func (bi *bucketInfo) isEmpty() (empty bool) {
bi.mu.RLock()
empty = len(bi.objects) == 0
bi.mu.RUnlock()
return empty
}
// the object data and metadata
type objectData struct {
modTime time.Time
hash string
mimeType string
data []byte
}
// Object describes a memory object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
od *objectData // the object data
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Memory root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
opt: *opt,
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
if od != nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
err = fs.ErrorIsFile
}
}
return f, err
}
// newObject makes an object from a remote and an objectData
func (f *Fs) newObject(remote string, od *objectData) *Object {
return &Object{fs: f, remote: remote, od: od}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
bucket, bucketPath := f.split(remote)
od := buckets.getObjectData(bucket, bucketPath)
if od == nil {
return nil, fs.ErrorObjectNotFound
}
return f.newObject(remote, od), nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, entry fs.DirEntry, isDirectory bool) error
// list the buckets to fn
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
b := buckets.getBucket(bucket)
if b == nil {
return fs.ErrorDirNotFound
}
b.mu.RLock()
defer b.mu.RUnlock()
dirs := make(map[string]struct{})
for absPath, od := range b.objects {
if strings.HasPrefix(absPath, directory) {
remote := absPath[len(prefix):]
if !recurse {
localPath := absPath[len(directory):]
slash := strings.IndexRune(localPath, '/')
if slash >= 0 {
// send a directory if have a slash
dir := directory + localPath[:slash]
if addBucket {
dir = path.Join(bucket, dir)
}
_, found := dirs[dir]
if !found {
err = fn(dir, fs.NewDir(dir, time.Time{}), true)
if err != nil {
return err
}
dirs[dir] = struct{}{}
}
continue // don't send this file if not recursing
}
}
// send an object
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, f.newObject(remote, od), false)
if err != nil {
return err
}
}
}
return nil
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
entries = append(entries, entry)
return nil
})
return entries, err
}
// listBuckets lists the buckets to entries
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
buckets.mu.RLock()
defer buckets.mu.RUnlock()
for name := range buckets.buckets {
entries = append(entries, fs.NewDir(name, time.Time{}))
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
}
return list.Flush()
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
od: &objectData{
modTime: src.ModTime(ctx),
},
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
buckets.makeBucket(bucket)
return nil
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
return nil
}
return buckets.deleteBucket(bucket)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
_ = buckets.makeBucket(dstBucket)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
od := buckets.getObjectData(srcBucket, srcPath)
if od == nil {
return nil, fs.ErrorObjectNotFound
}
buckets.updateObjectData(dstBucket, dstPath, od)
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hashType)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hashType {
return "", hash.ErrUnsupported
}
if o.od.hash == "" {
sum := md5.Sum(o.od.data)
o.od.hash = hex.EncodeToString(sum[:])
}
return o.od.hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(len(o.od.data))
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
return o.od.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.od.modTime = modTime
return nil
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, limit = x.Decode(int64(len(o.od.data)))
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if offset > int64(len(o.od.data)) {
offset = int64(len(o.od.data))
}
data := o.od.data[offset:]
if limit >= 0 {
if limit > int64(len(data)) {
limit = int64(len(data))
}
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
if err != nil {
return errors.Wrap(err, "failed to update memory object")
}
o.od = &objectData{
data: data,
hash: "",
modTime: src.ModTime(ctx),
mimeType: fs.MimeType(ctx, o),
}
buckets.updateObjectData(bucket, bucketPath, o.od)
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
removed := buckets.removeObjectData(bucket, bucketPath)
if !removed {
return fs.ErrorObjectNotFound
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.od.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -1,16 +0,0 @@
// Test memory filesystem interface
package memory
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: ":memory:",
NilObject: (*Object)(nil),
})
}

View File

@@ -272,19 +272,19 @@ type CreateShareLinkResponse struct {
} `json:"link"`
}
// AsyncOperationStatus provides information on the status of an asynchronous job progress.
// AsyncOperationStatus provides information on the status of a asynchronous job progress.
//
// The following API calls return AsyncOperationStatus resources:
//
// Copy Item
// Upload From URL
type AsyncOperationStatus struct {
PercentageComplete float64 `json:"percentageComplete"` // A float value between 0 and 100 that indicates the percentage complete.
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
}
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # separator
// If DriveID is known it will be prefixed to the ID with # seperator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *Item) GetID() string {
if i.IsRemote() && i.RemoteItem.ID != "" {
@@ -410,28 +410,3 @@ func (i *Item) GetParentReference() *ItemReference {
func (i *Item) IsRemote() bool {
return i.RemoteItem != nil
}
// User details for each version
type User struct {
Email string `json:"email"`
ID string `json:"id"`
DisplayName string `json:"displayName"`
}
// LastModifiedBy for each version
type LastModifiedBy struct {
User User `json:"user"`
}
// Version info
type Version struct {
ID string `json:"id"`
LastModifiedDateTime time.Time `json:"lastModifiedDateTime"`
Size int `json:"size"`
LastModifiedBy LastModifiedBy `json:"lastModifiedBy"`
}
// VersionsResponse is returned from /versions
type VersionsResponse struct {
Versions []Version `json:"value"`
}

511
backend/onedrive/onedrive.go Executable file → Normal file
View File

@@ -12,9 +12,7 @@ import (
"log"
"net/http"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -25,13 +23,11 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
@@ -39,6 +35,8 @@ import (
"golang.org/x/oauth2"
)
const enc = encodings.OneDrive
const (
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
@@ -63,7 +61,7 @@ var (
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -82,7 +80,7 @@ func init() {
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
err := oauthutil.Config("onedrive", name, m, oauthConfig, nil)
err := oauthutil.Config("onedrive", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
@@ -187,28 +185,6 @@ func init() {
log.Fatalf("Failed to query available drives: %v", err)
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
log.Fatalf("No drives found")
} else {
@@ -241,13 +217,18 @@ func init() {
m.Set(configDriveType, rootItem.ParentReference.DriveType)
config.SaveConfig()
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Microsoft App Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Microsoft App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory.`,
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
@@ -271,92 +252,16 @@ delete OneNote files or otherwise want them to show up in directory
listing, set this option.`,
Default: false,
Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
This can be useful if you wish to do a server side copy between two
different Onedrives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: "no_versions",
Default: false,
Help: `Remove all versions on modifying operations
Onedrive for business creates versions when rclone uploads new files
overwriting an existing one and when it sets the modification time.
These versions take up space out of the quota.
This flag checks for versions after file upload and setting
modification time and removes all but the last version.
**NB** Onedrive personal can't currently delete versions so don't use
this flag there.
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
// # (number sign) -> '' // FULLWIDTH NUMBER SIGN
// % (percent sign) -> '' // FULLWIDTH PERCENT SIGN
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
// ~ (tilde) -> '' // FULLWIDTH TILDE
//
// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as json doesn't handle them.
//
// The OneDrive API documentation lists the set of reserved characters, but
// testing showed this list is incomplete. This are the differences:
// - " (double quote) is rejected, but missing in the documentation
// - space at the end of file and folder names is rejected, but missing in the documentation
// - period at the end of file names is rejected, but missing in the documentation
//
// Adding these restrictions to the OneDrive API documentation yields exactly
// the same rules as the Windows naming conventions.
//
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeHashPercent |
encoder.EncodeLeftSpace |
encoder.EncodeLeftTilde |
encoder.EncodeRightPeriod |
encoder.EncodeRightSpace |
encoder.EncodeWin |
encoder.EncodeInvalidUtf8),
}}...),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
NoVersions bool `config:"no_versions"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
}
// Fs represents a remote one drive
@@ -411,7 +316,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a one drive 'url'
// parsePath parses an one drive 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -430,32 +335,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
retry := false
if resp != nil {
switch resp.StatusCode {
case 401:
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
fs.Debugf(nil, "Should retry: %v", err)
}
case 429: // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
retryAfter, parseErr := strconv.Atoi(values[0])
if parseErr != nil {
fs.Debugf(nil, "Failed to parse Retry-After: %q: %v", values[0], parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
retry = true
err = pacer.RetryAfterError(err, duration)
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
}
}
case 507: // Insufficient Storage
return false, fserrors.FatalError(err)
}
authRetry := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
@@ -465,13 +351,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
//
// If `relPath` == '', do not append the slash (See #3664)
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
if relPath != "" {
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
}
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
@@ -494,7 +375,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
Path: "/root:/" + rest.URLPathEscape(enc.FromStandardPath(path)),
}
}
err = f.pacer.Call(func() (bool, error) {
@@ -513,13 +394,13 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
var dirCacheFoundRoot bool
var rootNormalizedID string
if f.dirCache != nil {
rootNormalizedID, err = f.dirCache.RootID(ctx, false)
dirCacheRootIDExists := err == nil
var dirCacheRootIDExists bool
rootNormalizedID, dirCacheRootIDExists = f.dirCache.Get("")
if f.root == "" {
// if f.root == "", it means f.root is the absolute root of the drive
// and its ID should have been found in NewFs
dirCacheFoundRoot = dirCacheRootIDExists
} else if _, err := f.dirCache.RootParentID(ctx, false); err == nil {
} else if _, err := f.dirCache.RootParentID(); err == nil {
// if root is in a folder, it must have a parent folder, and
// if dirCache has found root in NewFs, the parent folder's ID
// should be present.
@@ -627,7 +508,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
@@ -743,7 +623,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
var info *api.Item
opts := newOptsCall(dirID, "POST", "/children")
mkdir := api.CreateItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
ConflictBehavior: "fail",
}
err = f.pacer.Call(func() (bool, error) {
@@ -803,7 +683,7 @@ OUTER:
if item.Deleted != nil {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.GetName())
item.Name = enc.ToStandardName(item.GetName())
if fn(item) {
found = true
break OUTER
@@ -828,6 +708,10 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -875,7 +759,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, leaf, directoryID, err
}
@@ -906,7 +790,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -929,6 +819,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -1026,13 +920,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// Check we aren't overwriting a file on the same remote
if srcObj.fs == f {
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -1048,7 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
id, dstDriveID, _ := parseNormalizedID(directoryID)
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
replacedLeaf := enc.FromStandardName(leaf)
copyReq := api.CopyItemRequest{
Name: &replacedLeaf,
ParentReference: api.ItemReference{
@@ -1088,13 +979,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// Move src to this remote using server side move operations.
@@ -1122,10 +1013,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
id, dstDriveID, _ := parseNormalizedID(directoryID)
_, srcObjDriveID, _ := parseNormalizedID(srcObj.id)
if f.canonicalDriveID(dstDriveID) != srcObj.fs.canonicalDriveID(srcObjDriveID) {
if dstDriveID != srcObjDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
fs.Debugf(f, "Can't move files between drives (%q != %q)", dstDriveID, srcObjDriveID)
return nil, fs.ErrorCantMove
}
@@ -1133,7 +1023,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
opts := newOptsCall(srcObj.id, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
Name: enc.FromStandardName(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: id,
@@ -1175,22 +1065,70 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
_, srcDriveID, _ := parseNormalizedID(srcID)
if f.canonicalDriveID(dstDriveID) != srcFs.canonicalDriveID(srcDriveID) {
if dstDriveID != srcDriveID {
// https://docs.microsoft.com/en-us/graph/api/driveitem-move?view=graph-rest-1.0
// "Items cannot be moved between Drives using this request."
fs.Debugf(f, "Can't move directories between drives (%q != %q)", dstDriveID, srcDriveID)
return fs.ErrorCantDirMove
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Get timestamps of src so they can be preserved
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
if err != nil {
@@ -1200,7 +1138,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Do the move
opts := newOptsCall(srcID, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(dstLeaf),
Name: enc.FromStandardName(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: parsedDstDirID,
@@ -1247,10 +1185,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, errors.Wrap(err, "about failed")
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
return &fs.Usage{}, nil
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Used), // bytes in use
@@ -1268,8 +1202,8 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(QuickXorHashType)
}
// PublicLink returns a link for downloading without account.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
// PublicLink returns a link for downloading without accout.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
if err != nil {
return "", err
@@ -1294,73 +1228,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return result.Link.WebURL, nil
}
// CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error {
token := make(chan struct{}, fs.Config.Checkers)
var wg sync.WaitGroup
err := walk.Walk(ctx, f, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
err = entries.ForObjectError(func(obj fs.Object) error {
o, ok := obj.(*Object)
if !ok {
return errors.New("internal error: not a onedrive object")
}
wg.Add(1)
token <- struct{}{}
go func() {
defer func() {
<-token
wg.Done()
}()
err := o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}()
return nil
})
wg.Wait()
return err
})
return err
}
// Finds and removes any old versions for o
func (o *Object) deleteVersions(ctx context.Context) error {
opts := newOptsCall(o.id, "GET", "/versions")
var versions api.VersionsResponse
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &versions)
return shouldRetry(resp, err)
})
if err != nil {
return err
}
if len(versions.Versions) < 2 {
return nil
}
for _, version := range versions.Versions[1:] {
err = o.deleteVersion(ctx, version.ID)
if err != nil {
return err
}
}
return nil
}
// Finds and removes any old versions for o
func (o *Object) deleteVersion(ctx context.Context, ID string) error {
if operations.SkipDestructive(ctx, fmt.Sprintf("%s of %s", ID, o.remote), "delete version") {
return nil
}
fs.Infof(o, "removing version %q", ID)
opts := newOptsCall(o.id, "DELETE", "/versions/"+ID)
opts.NoResponse = true
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
return shouldRetry(resp, err)
})
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1393,7 +1260,7 @@ func (o *Object) rootPath() string {
// srvPath returns a path for use in server given a remote
func (f *Fs) srvPath(remote string) string {
return f.opt.Enc.FromStandardPath(f.rootSlash() + remote)
return enc.FromStandardPath(f.rootSlash() + remote)
}
// srvPath returns a path for use in server
@@ -1505,7 +1372,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
opts = rest.Opts{
Method: "PATCH",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardName(leaf))),
}
} else {
opts = rest.Opts{
@@ -1524,13 +1391,6 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
return shouldRetry(resp, err)
})
// Remove versions if required
if o.fs.opt.NoVersions {
err := o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}
return info, err
}
@@ -1587,7 +1447,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
Method: "POST",
RootURL: rootURL,
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
drive, id, rest.URLPathEscape(enc.FromStandardName(leaf))),
}
} else {
opts = rest.Opts{
@@ -1612,75 +1472,21 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
return response, err
}
// getPosition gets the current position in a multipart upload
func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: url,
}
var info api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return 0, err
}
if len(info.NextExpectedRanges) != 1 {
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
}
position := info.NextExpectedRanges[0]
i := strings.IndexByte(position, '-')
if i < 0 {
return 0, errors.Errorf("no '-' in next expected range: %q", position)
}
position = position[:i]
pos, err = strconv.ParseInt(position, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "bad expected range: %q", position)
}
return pos, nil
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
Body: chunk,
}
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
var skip = int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
toSend := chunkSize - skip
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &toSend,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
Body: chunk,
Options: options,
}
_, _ = chunk.Seek(skip, io.SeekStart)
_, _ = chunk.Seek(0, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
pos, posErr := o.getPosition(ctx, url)
if posErr != nil {
fs.Debugf(o, "Failed to read position: %v", posErr)
return false, posErr
}
skip = pos - start
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
switch {
case skip < 0:
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
case skip > chunkSize:
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
case skip == chunkSize:
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
return false, nil
}
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
}
if err != nil {
return shouldRetry(resp, err)
}
@@ -1715,27 +1521,46 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 {
return nil, errors.New("unknown-sized upload not supported")
}
uploadURLChan := make(chan string, 1)
gracefulCancel := func() {
uploadURL, ok := <-uploadURLChan
// Reading from uploadURLChan blocks the atexit process until
// we are able to use uploadURL to cancel the upload
if !ok { // createUploadSession failed - no need to cancel upload
return
}
fs.Debugf(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(ctx, uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
}
cancelFuncHandle := atexit.Register(gracefulCancel)
// Create upload session
fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession(ctx, modTime)
if err != nil {
close(uploadURLChan)
atexit.Unregister(cancelFuncHandle)
return nil, err
}
uploadURL := session.UploadURL
uploadURLChan <- uploadURL
// Cancel the session if something went wrong
defer atexit.OnError(&err, func() {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.cancelUploadSession(ctx, uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
defer func() {
if err != nil {
fs.Debugf(o, "Error encountered during upload: %v", err)
gracefulCancel()
}
})()
atexit.Unregister(cancelFuncHandle)
}()
// Upload the chunks
remaining := size
@@ -1747,7 +1572,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
if err != nil {
return nil, err
}
@@ -1760,7 +1585,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
@@ -1774,10 +1599,9 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
opts = rest.Opts{
Method: "PUT",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(enc.FromStandardName(leaf)) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
} else {
opts = rest.Opts{
@@ -1785,7 +1609,6 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
}
@@ -1827,9 +1650,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var info *api.Item
if size > 0 {
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
info, err = o.uploadMultipart(ctx, in, size, modTime)
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
info, err = o.uploadSinglepart(ctx, in, size, modTime)
} else {
return errors.New("unknown-sized upload not supported")
}
@@ -1837,14 +1660,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If updating the file then remove versions
if o.fs.opt.NoVersions && o.hasMetaData {
err = o.deleteVersions(ctx)
if err != nil {
fs.Errorf(o, "Failed to remove versions: %v", err)
}
}
return o.setMetaData(info)
}
@@ -1890,17 +1705,6 @@ func parseNormalizedID(ID string) (string, string, string) {
return ID, "", ""
}
// Returns the canonical form of the driveID
func (f *Fs) canonicalDriveID(driveID string) (canonicalDriveID string) {
if driveID == "" {
canonicalDriveID = f.opt.DriveID
} else {
canonicalDriveID = driveID
}
canonicalDriveID = strings.ToLower(canonicalDriveID)
return canonicalDriveID
}
// getRelativePathInsideBase checks if `target` is inside `base`. If so, it
// returns a relative path for `target` based on `base` and a boolean `true`.
// Otherwise returns "", false.
@@ -1941,7 +1745,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}

View File

@@ -13,20 +13,21 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
)
const enc = encodings.OpenDrive
const (
defaultEndpoint = "https://dev.opendrive.com/api/v1"
minSleep = 10 * time.Millisecond
@@ -49,57 +50,14 @@ func init() {
Help: "Password.",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
//
// Additionally names can't begin or end with an ASCII whitespace.
// List of replaced characters:
// (space) -> '␠' // SYMBOL FOR SPACE
// (horizontal tab) -> '␉' // SYMBOL FOR HORIZONTAL TABULATION
// (line feed) -> '␊' // SYMBOL FOR LINE FEED
// (vertical tab) -> '␋' // SYMBOL FOR VERTICAL TABULATION
// (carriage return) -> '␍' // SYMBOL FOR CARRIAGE RETURN
//
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
//
// https://www.opendrive.com/wp-content/uploads/guides/OpenDrive_API_guide.pdf
Default: (encoder.Base |
encoder.EncodeWin |
encoder.EncodeLeftCrLfHtVt |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeBackSlash |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}, {
Name: "chunk_size",
Help: `Files will be uploaded in chunks this size.
Note that these chunks are buffered in memory so increasing them will
increase memory use.`,
Default: 10 * fs.MebiByte,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UserName string `config:"username"`
Password string `config:"password"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UserName string `config:"username"`
Password string `config:"password"`
}
// Fs represents a remote server
@@ -280,7 +238,13 @@ func errorHandler(resp *http.Response) error {
// Mkdir creates the folder if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// fs.Debugf(nil, "Mkdir(\"%s\")", dir)
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -306,6 +270,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -473,8 +441,58 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
@@ -486,9 +504,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
moveFolderData := moveCopyFolder{
SessionID: f.session.SessionID,
FolderID: srcID,
DstFolderID: dstDirectoryID,
DstFolderID: directoryID,
Move: "true",
NewFolderName: dstLeaf,
NewFolderName: leaf,
}
opts := rest.Opts{
Method: "POST",
@@ -506,13 +524,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// Return an Object from a path
@@ -560,7 +578,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, leaf, directoryID, err
}
@@ -570,7 +588,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
fs: f,
remote: remote,
}
return o, f.opt.Enc.FromStandardName(leaf), directoryID, nil
return o, enc.FromStandardName(leaf), directoryID, nil
}
// readMetaDataForPath reads the metadata from the path
@@ -617,7 +635,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
}
if "" == o.id {
// We need to create an ID for this file
// We need to create a ID for this file
var resp *http.Response
response := createFileResponse{}
err := o.fs.pacer.Call(func() (bool, error) {
@@ -627,9 +645,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
Name: leaf,
}
opts := rest.Opts{
Method: "POST",
Options: options,
Path: "/upload/create_file.json",
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
@@ -646,6 +663,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
423, // Locked - get this on folders sometimes
@@ -672,7 +690,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
err = f.pacer.Call(func() (bool, error) {
createDirData := createFolder{
SessionID: f.session.SessionID,
FolderName: f.opt.Enc.FromStandardName(leaf),
FolderName: enc.FromStandardName(leaf),
FolderSubParent: pathID,
FolderIsPublic: 0,
FolderPublicUpl: 0,
@@ -718,7 +736,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return "", false, errors.Wrap(err, "failed to get folder list")
}
leaf = f.opt.Enc.FromStandardName(leaf)
leaf = enc.FromStandardName(leaf)
for _, folder := range folderList.Folders {
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
@@ -742,6 +760,10 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// fs.Debugf(nil, "List(%v)", dir)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -762,7 +784,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for _, folder := range folderList.Folders {
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
folder.Name = enc.ToStandardName(folder.Name)
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
remote := path.Join(dir, folder.Name)
// cache the directory ID for later lookups
@@ -773,7 +795,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for _, file := range folderList.Files {
file.Name = f.opt.Enc.ToStandardName(file.Name)
file.Name = enc.ToStandardName(file.Name)
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
remote := path.Join(dir, file.Name)
o, err := f.newObjectWithInfo(ctx, remote, &file)
@@ -906,9 +928,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
// fs.Debugf(nil, "PreOpen: %#v", openUploadData)
opts := rest.Opts{
Method: "POST",
Options: options,
Path: "/upload/open_file_upload.json",
Method: "POST",
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err)
@@ -919,13 +940,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
buf := make([]byte, o.fs.opt.ChunkSize)
// 10 MB chunks size
chunkSize := int64(1024 * 1024 * 10)
buf := make([]byte, int(chunkSize))
chunkOffset := int64(0)
remainingBytes := size
chunkCounter := 0
for remainingBytes > 0 {
currentChunkSize := int64(o.fs.opt.ChunkSize)
currentChunkSize := chunkSize
if currentChunkSize > remainingBytes {
currentChunkSize = remainingBytes
}
@@ -1014,7 +1037,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
func (o *Object) readMetaData(ctx context.Context) (err error) {
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
@@ -1027,7 +1050,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
opts := rest.Opts{
Method: "GET",
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
o.fs.session.SessionID, directoryID, url.QueryEscape(enc.FromStandardName(leaf))),
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
return o.fs.shouldRetry(resp, err)

View File

@@ -18,13 +18,13 @@ func (e *Error) Error() string {
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
}
// Account describes an OpenDRIVE account
// Account describes a OpenDRIVE account
type Account struct {
Username string `json:"username"`
Password string `json:"passwd"`
}
// UserSessionInfo describes an OpenDRIVE session
// UserSessionInfo describes a OpenDRIVE session
type UserSessionInfo struct {
Username string `json:"username"`
Password string `json:"passwd"`
@@ -45,7 +45,7 @@ type UserSessionInfo struct {
PartnerUsersDomain string `json:"PartnerUsersDomain"`
}
// FolderList describes an OpenDRIVE listing
// FolderList describes a OpenDRIVE listing
type FolderList struct {
// DirUpdateTime string `json:"DirUpdateTime,string"`
Name string `json:"Name"`
@@ -56,7 +56,7 @@ type FolderList struct {
Files []File `json:"Files"`
}
// Folder describes an OpenDRIVE folder
// Folder describes a OpenDRIVE folder
type Folder struct {
FolderID string `json:"FolderID"`
Name string `json:"Name"`
@@ -109,7 +109,7 @@ type removeFolder struct {
FolderID string `json:"folder_id"`
}
// File describes an OpenDRIVE file
// File describes a OpenDRIVE file
type File struct {
FileID string `json:"FileId"`
FileHash string `json:"FileHash"`

View File

@@ -152,14 +152,6 @@ type ChecksumFileResult struct {
Metadata Item `json:"metadata"`
}
// PubLinkResult is returned from /getfilepublink and /getfolderpublink
type PubLinkResult struct {
Error
LinkID int `json:"linkid"`
Link string `json:"link"`
LinkCode string `json:"code"`
}
// UserInfo is returned from /userinfo
type UserInfo struct {
Error

View File

@@ -26,23 +26,26 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const enc = encodings.Pcloud
const (
rcloneClientID = "DnONSzyJXpm"
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultHostname = "api.pcloud.com"
decayConstant = 2 // bigger for slower decay, exponential
rootID = "d0" // ID of root folder is always this
rootURL = "https://api.pcloud.com"
)
// Globals
@@ -51,8 +54,8 @@ var (
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
AuthURL: "https://my.pcloud.com/oauth2/authorize",
TokenURL: "https://api.pcloud.com/oauth2_token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -60,90 +63,30 @@ var (
}
)
// Update the TokenURL with the actual hostname
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
}
// Register with Fs
func init() {
updateTokenURL(oauthConfig, defaultHostname)
fs.Register(&fs.RegInfo{
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
optc := new(Options)
err := configstruct.Set(m, optc)
if err != nil {
fs.Errorf(nil, "Failed to read config: %v", err)
}
updateTokenURL(oauthConfig, optc.Hostname)
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("form not found in response")
}
hostname := auth.Form.Get("hostname")
if hostname == "" {
hostname = defaultHostname
}
// Save the hostname in the config
m.Set("hostname", hostname)
// Update the token URL
updateTokenURL(oauthConfig, hostname)
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
return nil
}
opt := oauthutil.Options{
CheckAuth: checkAuth,
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
}
err = oauthutil.Config("pcloud", name, m, oauthConfig, &opt)
err := oauthutil.Config("pcloud", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
//
// TODO: Investigate Unicode simplification ( gets converted to \ server-side)
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Pcloud App Client Id\nLeave blank normally.",
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
}, {
Name: "hostname",
Help: `Hostname to connect to.
This is normally set when rclone initially does the oauth connection,
however you will need to set it by hand if you are using remote config
with rclone authorize.
`,
Default: defaultHostname,
Advanced: true,
Examples: []fs.OptionExample{{
Value: defaultHostname,
Help: "Original/US region",
}, {
Value: "eapi.pcloud.com",
Help: "EU region",
}},
}}...),
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret\nLeave blank normally.",
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
Hostname string `config:"hostname"`
}
// Fs represents a remote pcloud
@@ -195,7 +138,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a pcloud 'url'
// parsePath parses an pcloud 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
@@ -238,7 +181,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -293,13 +236,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "failed to configure Pcloud")
}
updateTokenURL(oauthConfig, opt.Hostname)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
@@ -314,8 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return err
})
// Get rootFolderID
rootID := f.opt.RootFolderID
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -401,7 +342,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Path: "/createfolder",
Parameters: url.Values{},
}
opts.Parameters.Set("name", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("name", enc.FromStandardName(leaf))
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
@@ -477,7 +418,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
continue
}
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
item.Name = enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
@@ -496,6 +437,10 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -536,7 +481,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return
}
@@ -567,7 +512,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -579,6 +530,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -655,7 +610,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: url.Values{},
}
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
var resp *http.Response
@@ -675,18 +630,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
rootID, err := f.dirCache.RootID(ctx, false)
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
@@ -695,7 +650,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
Path: "/trash_clear",
Parameters: url.Values{},
}
opts.Parameters.Set("folderid", dirIDtoNumber(rootID))
opts.Parameters.Set("folderid", dirIDtoNumber(f.dirCache.RootID()))
var resp *http.Response
var result api.Error
return f.pacer.Call(func() (bool, error) {
@@ -734,7 +689,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: url.Values{},
}
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
var resp *http.Response
var result api.ItemResult
@@ -768,8 +723,58 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
@@ -781,8 +786,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Parameters: url.Values{},
}
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(dstLeaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(dstDirectoryID))
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
@@ -804,61 +809,6 @@ func (f *Fs) DirCacheFlush() {
f.dirCache.ResetRoot()
}
func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (string, error) {
opts := rest.Opts{
Method: "POST",
Path: "/getfolderpublink",
Parameters: url.Values{},
}
var result api.PubLinkResult
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
}
return result.Link, err
}
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
obj, err := f.NewObject(ctx, path)
if err != nil {
return "", err
}
o := obj.(*Object)
opts := rest.Opts{
Method: "POST",
Path: "/getfilepublink",
Parameters: url.Values{},
}
var result api.PubLinkResult
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
if err != nil {
return "", err
}
return result.Link, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
dirID, err := f.dirCache.FindDir(ctx, remote, false)
if err == fs.ErrorDirNotFound {
return f.linkFile(ctx, remote, expire)
}
if err != nil {
return "", err
}
return f.linkDir(ctx, dirID, expire)
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
@@ -1088,7 +1038,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remote := o.Remote()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return err
}
@@ -1115,9 +1065,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size,
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Options: options,
}
leaf = o.fs.opt.Enc.FromStandardName(leaf)
leaf = enc.FromStandardName(leaf)
opts.Parameters.Set("filename", leaf)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("nopartial", "1")
@@ -1193,7 +1142,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)

View File

@@ -10,7 +10,7 @@ type Response struct {
Status string `json:"status"`
}
// Error satisfies the error interface
// Error statisfies the error interface
func (e *Response) Error() string {
return fmt.Sprintf("%s: %s", e.Status, e.Message)
}

View File

@@ -31,15 +31,14 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/premiumizeme/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
@@ -47,6 +46,8 @@ import (
"golang.org/x/oauth2"
)
const enc = encodings.PremiumizeMe
const (
rcloneClientID = "658922194"
rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q"
@@ -79,7 +80,7 @@ func init() {
Description: "premiumize.me",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.Config("premiumizeme", name, m, oauthConfig, nil)
err := oauthutil.Config("premiumizeme", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
@@ -92,23 +93,13 @@ This is not normally used - use oauth instead.
`,
Hide: fs.OptionHideBoth,
Default: "",
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
}
// Fs represents a remote cloud storage system
@@ -183,7 +174,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -315,6 +306,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
return f.root
}
return f.root + "/"
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -365,7 +364,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Path: "/folder/create",
Parameters: f.baseParams(),
MultipartParams: url.Values{
"name": {f.opt.Enc.FromStandardName(leaf)},
"name": {enc.FromStandardName(leaf)},
"parent_id": {pathID},
},
}
@@ -430,7 +429,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
item.Name = enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
@@ -450,6 +449,10 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -489,7 +492,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return
}
@@ -513,7 +516,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
return f.PutUnchecked(ctx, in, src)
default:
return nil, err
}
@@ -540,7 +543,13 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.dirCache.FindDir(ctx, dir, true)
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
@@ -552,6 +561,10 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return errors.New("can't purge root directory")
}
dc := f.dirCache
err := dc.FindRoot(ctx, false)
if err != nil {
return err
}
rootID, err := dc.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -609,13 +622,13 @@ func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// move a file or folder
@@ -624,8 +637,8 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// between directories and a separate one to rename them. We try to
// call the minimum number of API calls.
func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) {
newLeaf = f.opt.Enc.FromStandardName(newLeaf)
oldLeaf = f.opt.Enc.FromStandardName(oldLeaf)
newLeaf = enc.FromStandardName(newLeaf)
oldLeaf = enc.FromStandardName(oldLeaf)
doRenameLeaf := oldLeaf != newLeaf
doMove := oldDirectoryID != newDirectoryID
@@ -731,14 +744,75 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
fs.Debugf(src, "DirMove error: Can't move root")
return errors.New("can't move root directory")
}
// find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, directoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Find ID of src parent, not creating subdirs
var srcLeaf, srcDirectoryID string
findPath = srcRemote
if srcRemote == "" {
findPath = srcFs.root
}
srcLeaf, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
if err != nil {
return err
}
// Do the move
err = f.move(ctx, false, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
err = f.move(ctx, false, srcID, srcLeaf, leaf, srcDirectoryID, directoryID)
if err != nil {
return err
}
@@ -747,7 +821,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
_, err := f.dirCache.FindDir(ctx, remote, false)
if err == nil {
return "", fs.ErrorCantShareDirectories
@@ -815,6 +889,11 @@ func (o *Object) Remote() string {
return o.remote
}
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return enc.FromStandardPath(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return "", hash.ErrUnsupported
@@ -905,6 +984,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
// metaHash returns a rough hash of metadata to detect if object has been updated
func (o *Object) metaHash() string {
if !o.hasMetaData {
return ""
}
return fmt.Sprintf("remote=%q, size=%d, modTime=%v, id=%q, mimeType=%q", o.remote, o.size, o.modTime, o.id, o.mimeType)
}
// Update the object with the contents of the io.Reader, modTime and size
//
// If existing is set then it updates the object rather than creating a new one
@@ -915,11 +1002,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
// Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return err
}
leaf = o.fs.opt.Enc.FromStandardName(leaf)
leaf = enc.FromStandardName(leaf)
var resp *http.Response
var info api.FolderUploadinfoResponse
@@ -927,7 +1014,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/folder/uploadinfo",
Parameters: o.fs.baseParams(),
Options: options,
MultipartParams: url.Values{
"id": {directoryID},
},

View File

@@ -1,43 +0,0 @@
package putio
import (
"fmt"
"net/http"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs/fserrors"
)
func checkStatusCode(resp *http.Response, expected int) error {
if resp.StatusCode != expected {
return &statusCodeError{response: resp}
}
return nil
}
type statusCodeError struct {
response *http.Response
}
func (e *statusCodeError) Error() string {
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
}
func (e *statusCodeError) Temporary() bool {
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if perr, ok := err.(*putio.ErrorResponse); ok {
err = &statusCodeError{response: perr.Response}
}
if fserrors.ShouldRetry(err) {
return true, err
}
return false, err
}

View File

@@ -17,8 +17,7 @@ import (
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
@@ -31,12 +30,10 @@ type Fs struct {
name string // name of this remote
root string // the path we are working on
features *fs.Features // optional features
opt Options // options for this Fs
client *putio.Client // client for making API calls to Put.io
pacer *fs.Pacer // To pace the API calls
dirCache *dircache.DirCache // Map of directory path to directory id
httpClient *http.Client // base http client
oAuthClient *http.Client // http client with oauth Authorization
oAuthClient *http.Client
}
// ------------------------------------------------------------
@@ -61,34 +58,35 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a putio 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if fserrors.ShouldRetry(err) {
return true, err
}
if perr, ok := err.(*putio.ErrorResponse); ok {
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
return true, err
}
}
return false, err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = parsePath(root)
httpClient := fshttp.NewClient(fs.Config)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
}
p := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient),
httpClient: httpClient,
oAuthClient: oAuthClient,
}
p.features = (&fs.Features{
@@ -147,7 +145,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var entry putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
entry, err = f.client.Files.CreateFolder(ctx, f.opt.Enc.FromStandardName(leaf), parentID)
entry, err = f.client.Files.CreateFolder(ctx, enc.FromStandardName(leaf), parentID)
return shouldRetry(err)
})
return itoa(entry.ID), err
@@ -174,7 +172,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return
}
for _, child := range children {
if f.opt.Enc.ToStandardName(child.Name) == leaf {
if enc.ToStandardName(child.Name) == leaf {
found = true
pathIDOut = itoa(child.ID)
if !child.IsDir() {
@@ -197,6 +195,10 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
@@ -212,7 +214,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return
}
for _, child := range children {
remote := path.Join(dir, f.opt.Enc.ToStandardName(child.Name))
remote := path.Join(dir, enc.ToStandardName(child.Name))
// fs.Debugf(f, "child: %s", remote)
if child.IsDir() {
f.dirCache.Put(remote, itoa(child.ID))
@@ -256,11 +258,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
size := src.Size()
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options)
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
if err != nil {
return nil, err
}
@@ -280,7 +282,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObjectWithInfo(ctx, remote, entry)
}
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
err = f.pacer.Call(func() (bool, error) {
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
@@ -290,12 +292,11 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
b64name := base64.StdEncoding.EncodeToString([]byte(f.opt.Enc.FromStandardName(name)))
b64name := base64.StdEncoding.EncodeToString([]byte(enc.FromStandardName(name)))
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
fs.OpenOptionAddHTTPHeaders(req.Header, options)
resp, err := f.oAuthClient.Do(req)
retry, err := shouldRetry(err)
if retry {
@@ -317,125 +318,66 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
}
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", &fileID, &err)
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
if size == 0 {
err = f.pacer.Call(func() (bool, error) {
fs.Debugf(f, "Sending zero length chunk")
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
return shouldRetry(err)
})
return
}
var clientOffset int64
var offsetMismatch bool
var start int64
buf := make([]byte, defaultChunkSize)
for clientOffset < size {
chunkSize := size - clientOffset
if chunkSize >= int64(defaultChunkSize) {
chunkSize = int64(defaultChunkSize)
for start < size {
reqSize := size - start
if reqSize >= int64(defaultChunkSize) {
reqSize = int64(defaultChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunkStart := clientOffset
reqSize := chunkSize
transferOffset := clientOffset
fs.Debugf(f, "chunkStart: %d, reqSize: %d", chunkStart, reqSize)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
// Transfer the chunk
err = f.pacer.Call(func() (bool, error) {
if offsetMismatch {
// Get file offset and seek to the position
offset, err := f.getServerOffset(ctx, location)
if err != nil {
return shouldRetry(err)
}
sentBytes := offset - chunkStart
fs.Debugf(f, "sentBytes: %d", sentBytes)
_, err = chunk.Seek(sentBytes, io.SeekStart)
if err != nil {
return shouldRetry(err)
}
transferOffset = offset
reqSize = chunkSize - sentBytes
offsetMismatch = false
}
fs.Debugf(f, "Sending chunk. transferOffset: %d length: %d", transferOffset, reqSize)
var serverOffset int64
serverOffset, fileID, err = f.transferChunk(ctx, location, transferOffset, chunk, reqSize)
if cerr, ok := err.(*statusCodeError); ok && cerr.response.StatusCode == 409 {
offsetMismatch = true
return true, err
}
if serverOffset != (transferOffset + reqSize) {
offsetMismatch = true
return true, errors.New("connection broken")
}
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
// TODO get file offset and seek to the position
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
return shouldRetry(err)
})
if err != nil {
return
}
clientOffset += chunkSize
start += reqSize
}
return
}
func (f *Fs) getServerOffset(ctx context.Context, location string) (offset int64, err error) {
// defer log.Trace(f, "location=%v", location)("offset=%v, err=%v", &offset, &err)
req, err := f.makeUploadHeadRequest(ctx, location)
if err != nil {
return 0, err
}
resp, err := f.oAuthClient.Do(req)
if err != nil {
return 0, err
}
err = checkStatusCode(resp, 200)
if err != nil {
return 0, err
}
return strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
}
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (serverOffset, fileID int64, err error) {
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", &fileID, &err)
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
_, _ = chunk.Seek(0, io.SeekStart)
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
if err != nil {
return
return 0, err
}
resp, err := f.oAuthClient.Do(req)
req = req.WithContext(ctx)
res, err := f.oAuthClient.Do(req)
if err != nil {
return
return 0, err
}
defer func() {
_ = resp.Body.Close()
_ = res.Body.Close()
}()
err = checkStatusCode(resp, 204)
if err != nil {
return
if res.StatusCode != 204 {
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
}
serverOffset, err = strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
if err != nil {
return
}
sfid := resp.Header.Get("putio-file-id")
sfid := res.Header.Get("putio-file-id")
if sfid != "" {
fileID, err = strconv.ParseInt(sfid, 10, 64)
if err != nil {
return
return 0, err
}
}
return
}
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
req, err := http.NewRequest("HEAD", location, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
return req, nil
return fileID, nil
}
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
@@ -454,13 +396,20 @@ func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
_, err = f.dirCache.FindDir(ctx, dir, true)
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
root := strings.Trim(path.Join(f.root, dir), "/")
@@ -477,20 +426,18 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
}
dirID := atoi(directoryID)
if check {
// check directory empty
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(children) != 0 {
return errors.New("directory not empty")
}
// check directory empty
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(children) != 0 {
return errors.New("directory not empty")
}
// remove it
@@ -503,26 +450,36 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return f.purgeCheck(ctx, dir, true)
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Purge deletes all the files in the directory
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context, dir string) (err error) {
func (f *Fs) Purge(ctx context.Context) (err error) {
// defer log.Trace(f, "")("err=%v", &err)
return f.purgeCheck(ctx, dir, false)
if f.root == "" {
return errors.New("can't purge root directory")
}
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
rootID := atoi(f.dirCache.RootID())
// Let putio delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "deleting file: %d", rootID)
err = f.client.Files.Delete(ctx, rootID)
return shouldRetry(err)
})
f.dirCache.ResetRoot()
return err
}
// Copy src to this remote using server side copy operations.
@@ -540,7 +497,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if !ok {
return nil, fs.ErrorCantCopy
}
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -548,7 +505,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", f.opt.Enc.FromStandardName(leaf))
params.Set("name", enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
if err != nil {
return false, err
@@ -579,7 +536,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
if !ok {
return nil, fs.ErrorCantMove
}
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -587,7 +544,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", f.opt.Enc.FromStandardName(leaf))
params.Set("name", enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
if err != nil {
return false, err
@@ -617,8 +574,57 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if !ok {
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
@@ -627,7 +633,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
params := url.Values{}
params.Set("file_id", srcID)
params.Set("parent_id", dstDirectoryID)
params.Set("name", f.opt.Enc.FromStandardName(dstLeaf))
params.Set("name", enc.FromStandardName(leaf))
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
if err != nil {
return false, err

View File

@@ -125,7 +125,7 @@ func (o *Object) setMetadataFromEntry(info putio.File) error {
// Reads the entry for a file from putio
func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
// defer log.Trace(o, "")("f=%+v, err=%v", f, &err)
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
@@ -137,7 +137,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
}
err = o.fs.pacer.Call(func() (bool, error) {
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf)), nil)
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(enc.FromStandardName(leaf)), nil)
if err != nil {
return false, err
}
@@ -241,17 +241,14 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
req.Header.Set(header, value)
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = o.fs.httpClient.Do(req)
resp, err = http.DefaultClient.Do(req)
return shouldRetry(err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
_ = resp.Body.Close()
return nil, fserrors.NoRetryError(err)
}
if err != nil {
return nil, err
}
return resp.Body, nil
return resp.Body, err
}
// Update the already existing object

Some files were not shown because too many files have changed in this diff Show More