1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-01 09:13:25 +00:00

Compare commits

..

10 Commits

Author SHA1 Message Date
Nick Craig-Wood
fd1154828f build: bring pipelines in to line with travis 2019-08-04 17:03:27 +01:00
Nick Craig-Wood
7b687709cf Attempt to cure the double build 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
812fcbbe2c Attempt to fix Windows CPATH 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
85a15d39d0 fix compile_all under linux 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
cc39c4e775 Set CPATH under Windows 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
4e46d26a0b Fix BRANCH and apt-get 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
0554daf3d8 Set working directory 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
f633996da6 build: whitespace! 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
9b74d1beb1 FIXME DISABLE TRAVIS and APPVEYOR 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
62a4bad5d2 build: add azure pipelines build 2019-08-04 16:11:23 +01:00
1962 changed files with 101769 additions and 288814 deletions

50
.circleci/config.yml Normal file
View File

@@ -0,0 +1,50 @@
---
version: 2
jobs:
build:
machine: true
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
steps:
- checkout
- run:
name: Cross-compile rclone
command: |
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
.
xgo \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
go version
go build
- run:
name: Upload artifacts
command: |
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: /tmp/rclone.dist

4
.gitattributes vendored
View File

@@ -1,7 +1,3 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true
# Don't fiddle with the line endings of test data
**/testdata/** -text
**/test/** -text

View File

@@ -1,254 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.11', 'go1.12', 'go1.13']
include:
- job_name: linux
os: ubuntu-latest
go: '1.14.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.14.x'
modules: 'off'
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.14.x'
modules: 'off'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.14.x'
modules: 'off'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.14.x'
modules: 'off'
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.14.x'
modules: 'on'
quicktest: true
racequicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: 'off'
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: 'off'
quicktest: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
modules: 'off'
quicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v1
with:
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
xgo:
timeout-minutes: 60
name: "xgo cross compile"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
with:
# Checkout into a fixed path to avoid import path problems on go < 1.11
path: ./src/github.com/rclone/rclone
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |
docker pull billziss/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
.
- name: Build rclone
run: |
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
- name: Upload artifacts
run: |
make circleci_upload
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# Upload artifacts if not a PR && not a fork
if: github.head_ref == '' && github.repository == 'rclone/rclone'

3
.gitignore vendored
View File

@@ -7,5 +7,4 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
*.log

2
.pkgr.yml Normal file
View File

@@ -0,0 +1,2 @@
default_dependencies: false
cli: rclone

View File

@@ -82,9 +82,13 @@ You patch will get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, squash the commits,
rebase it to master then push it to GitHub with `--force`.
## CI for your fork ##
## Enabling CI for your fork ##
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
websites, find your fork of rclone, and enable building there.
## Testing ##
@@ -114,7 +118,7 @@ but they can be run against any of the remotes.
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -subdir
cd fs/operations
go test -v -remote TestDrive:
@@ -148,7 +152,6 @@ with modules beneath.
* ...commands
* docs - the documentation and website
* content - adjust these docs only - everything else is autogenerated
* command - these are auto generated - edit the corresponding .go file
* fs - main rclone definitions - minimal amount of code
* accounting - bandwidth limiting and statistics
* asyncreader - an io.Reader which reads ahead
@@ -204,9 +207,6 @@ don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md
@@ -341,11 +341,6 @@ Getting going
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Unit tests
@@ -358,7 +353,7 @@ Integration tests
* Add your backend to `fstest/test_all/config.yaml`
* Once you've done that then you can use the integration test framework from the project root:
* go install ./...
* test_all -backends remote
* test_all -backend remote
Or if you want to run the integration tests manually:
@@ -367,59 +362,19 @@ Or if you want to run the integration tests manually:
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
Add your fs to the docs - you'll need to pick an icon for it from
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
alphabetical order of full name of remote (eg `drive` is ordered as
`Google Drive`) but with the local file system last.
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/about.md` - front page of rclone.org
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
* `bin/make_manual.py` - add the page to the `docs` constant
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

49
DISABLED.appveyor.yml Normal file
View File

@@ -0,0 +1,49 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\rclone\rclone
cache:
- '%LocalAppData%\go-build'
environment:
GOPATH: C:\gopath
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
ORIGPATH: '%PATH%'
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS:
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
install:
- choco install winfsp -y
- choco install zip -y
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
build_script:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go install
- go build
- make log_since_last_release > %TEMP%\git-log.txt
- make version > %TEMP%\version
- set /p RCLONE_VERSION=<%TEMP%\version
- set PATH=%PATHCC32%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
- set PATH=%PATHCC64%
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
test_script:
- make GOTAGS=cmount quicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

128
DISABLED.travis.yml Normal file
View File

@@ -0,0 +1,128 @@
---
language: go
sudo: required
dist: xenial
os:
- linux
go_import_path: github.com/rclone/rclone
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
install:
- make vars
env:
global:
- GOTAGS=cmount
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
- GO111MODULE=off
- GITHUB_USER=ncw
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
name: Linux
env:
- GOTAGS=cmount
- BUILD_FLAGS='-include "^linux/"'
- DEPLOY=true
script:
- make build_dep
- make check
- make quicktest
- go: 1.12.x
name: Go Modules / Race
env:
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
script:
- make quicktest
- make racequicktest
- go: 1.12.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all
- go: 1.12.x
name: macOS
os: osx
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
- BUILD_FLAGS='-include "^darwin/" -cgo'
- DEPLOY=true
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# name: Windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
deploy:
provider: script
script: make travis_beta
skip_cleanup: true
on:
repo: rclone/rclone
all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true

View File

@@ -1,22 +0,0 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

View File

@@ -12,7 +12,6 @@ Current active maintainers of rclone are:
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft**

7690
MANUAL.html generated

File diff suppressed because it is too large Load Diff

6745
MANUAL.md generated

File diff suppressed because it is too large Load Diff

6956
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,37 +1,23 @@
SHELL = bash
# Branch we are working on
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
# Tag of the current commit, if any. If this is not "" then we are building a release
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
# Version of last release (may not be on this branch)
VERSION := $(shell cat VERSION)
# Last tag on this branch
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
endif
TAG_BRANCH := -$(BRANCH)
BRANCH_PATH := branch/
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
TAG_BRANCH :=
BRANCH_PATH :=
endif
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
# TAG is current version + number of commits since last release + branch
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifndef RELEASE_TAG
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
ifneq ($(TAG),$(LAST_TAG))
TAG := $(TAG)-beta
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
@@ -41,23 +27,19 @@ BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
.PHONY: rclone test_all vars version
.PHONY: rclone vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
touch fs/version.go
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
vars:
@echo SHELL="'$(SHELL)'"
@echo BRANCH="'$(BRANCH)'"
@echo TAG="'$(TAG)'"
@echo VERSION="'$(VERSION)'"
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
@@ -65,7 +47,8 @@ version:
@echo '$(TAG)'
# Full suite of integration tests
test: rclone test_all
test: rclone
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
@@ -88,8 +71,8 @@ build_dep:
# Get the release dependencies
release_dep:
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
go get -u github.com/goreleaser/nfpm/...
go get -u github.com/aktau/github-release
# Update dependencies
update:
@@ -97,11 +80,6 @@ update:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md
@@ -183,9 +161,6 @@ endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
sudo chown -R $$USER build
find build -type l -delete
gzip -r9v build
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
@@ -193,14 +168,14 @@ endif
@echo Beta release ready at $(BETA_URL)/testbuilds
travis_beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
@@ -212,25 +187,24 @@ serve: website
cd docs && hugo server -v -w
tag: doc
@echo "Old tag is $(VERSION)"
@echo "New tag is $(NEXT_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
@echo "Old tag is $(LAST_TAG)"
@echo "New tag is $(NEW_TAG)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
mv docs/content/changelog.md.new docs/content/changelog.md
@echo "Edit the new changelog in docs/content/changelog.md"
@echo "Then commit all the changes"
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
@echo "And finally run make retag before make cross etc"
retag:
git tag -f -s -m "Version $(VERSION)" $(VERSION)
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(VERSION)-DEV development" fs/version.go
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -1,4 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -6,12 +6,13 @@
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[Forum](https://forum.rclone.org/) |
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
@@ -19,19 +20,17 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
@@ -40,10 +39,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
@@ -54,14 +51,11 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
@@ -78,7 +72,6 @@ Please see [the full list of all storage providers and their features](https://r
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))

View File

@@ -1,28 +1,20 @@
# Release
This file describes how to make the various kinds of releases
## Extra required software for making a release
Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
## Making a release
* git checkout master
* git pull
Making a release
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* Check travis & appveyor builds are green
* make check
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* edit docs/content/changelog.md
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --tags origin master
* # Wait for the GitHub builds to complete then...
* # Wait for the appveyor and travis builds to complete then...
* make fetch_binaries
* make tarball
* make sign_upload
@@ -34,8 +26,8 @@ This file describes how to make the various kinds of releases
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update
* git status
* git add new files
@@ -56,56 +48,24 @@ Can be fixed with
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug:
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-fixes
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend
* git push
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

View File

@@ -1 +0,0 @@
v1.51.0

189
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,189 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
trigger:
tags:
include:
- '*'
strategy:
matrix:
go1.9:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.11.8
MAKE_QUICKTEST: true
linux:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
BUILD_FLAGS: '-include "^linux/"'
MAKE_CHECK: true
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true
modules_race:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true
mac:
imageName: macos-10.13
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true
DEPLOY: true
windows:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
MAKE_QUICKTEST: true
DEPLOY: true
pool:
vmImage: $(imageName)
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
echo "##vso[task.setvariable variable=GO_LATEST]true"
echo "Latest Go version: $latestGo"
condition: eq( variables['GO_VERSION'], 'latest' )
displayName: "Get latest Go version"
- bash: |
sudo rm -f $(which go)
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
# Install Libraries (varies by platform)
- bash: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Libraries on Linux
- bash: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Libraries on macOS
- powershell: |
choco install -y winfsp zip make
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Go on macOS
- powershell: |
Write-Host "Downloading Go... (please be patient, I am very slow)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
Write-Host "Extracting Go... (I'm slow too)"
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Go on Windows
- bash: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nSystem environment:\n\n"
env
printf "\n\nRclone environment:\n\n"
make vars
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
- script: |
make
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
- bash: |
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
- bash: |
make build_dep
make check
workingDirectory: '$(modulePath)'
displayName: Code quality test
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make compile_all
workingDirectory: '$(modulePath)'
displayName: Compile all architectures test
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
- bash: |
make vars # FIXME travis_beta
workingDirectory: '$(modulePath)'
displayName: Deploy built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )

View File

@@ -8,7 +8,6 @@ import (
_ "github.com/rclone/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
@@ -21,20 +20,13 @@ import (
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/press"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -136,23 +135,15 @@ which downloads the file through a temporary URL directly from the
underlying S3 storage.`,
Default: defaultTempLinkThreshold,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
Enc encoder.MultiEncoder `config:"encoding"`
Checkpoint string `config:"checkpoint"`
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
}
// Fs represents a remote acd server
@@ -393,7 +384,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
var resp *http.Response
var subFolder *acd.Folder
err = f.pacer.Call(func() (bool, error) {
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
subFolder, resp, err = folder.GetFolder(leaf)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -420,7 +411,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
var resp *http.Response
var info *acd.Folder
err = f.pacer.Call(func() (bool, error) {
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.CreateFolder(leaf)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -488,7 +479,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
if !hasValidParent {
continue
}
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
// Store the nodes up in case we have to retry the listing
out = append(out, node)
}
@@ -678,7 +668,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
err = f.pacer.CallNoRetry(func() (bool, error) {
start := time.Now()
f.tokenRenewer.Start()
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop()
var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
@@ -1048,7 +1038,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var resp *http.Response
var info *acd.File
err = o.fs.pacer.Call(func() (bool, error) {
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
info, resp, err = folder.GetFile(leaf)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1168,7 +1158,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
newInfo, resp, err = info.Rename(newName)
return f.shouldRetry(resp, err)
})
return newInfo, err
@@ -1364,11 +1354,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
if len(node.Parents) > 0 {
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
// and append the drive file name to compute the full file name
name := f.opt.Enc.ToStandardName(*node.Name)
if len(path) > 0 {
path = path + "/" + name
path = path + "/" + *node.Name
} else {
path = name
path = *node.Name
}
// this will now clear the actual file too
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})

View File

@@ -16,6 +16,8 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
@@ -25,15 +27,12 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
)
@@ -125,50 +124,38 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeCtl |
encoder.EncodeDel |
encoder.EncodeBackSlash |
encoder.EncodeRightPeriod),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
}
// Fs represents a remote azure server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
isLimited bool // if limited to one container
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL
container string // the container we are working on
containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container
containerDeleted bool // true if we have deleted the container
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a azure object
@@ -192,18 +179,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.container
}
return f.container + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return "Azure root"
if f.root == "" {
return fmt.Sprintf("Azure container %s", f.container)
}
if f.rootDirectory == "" {
return fmt.Sprintf("Azure container %s", f.rootContainer)
}
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
}
// Features returns the optional features of this Fs
@@ -211,24 +198,21 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a azure path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in azure path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
switch tier {
@@ -321,9 +305,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Don't log stuff to syslog/Windows Event log
pipeline.SetForceLogEnabled(false)
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),
@@ -336,12 +317,6 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
@@ -363,6 +338,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
}
@@ -377,25 +356,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
var (
u *url.URL
serviceURL azblob.ServiceURL
u *url.URL
serviceURL azblob.ServiceURL
containerURL azblob.ContainerURL
)
switch {
case opt.UseEmulator:
@@ -409,6 +387,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
@@ -421,6 +400,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
@@ -431,30 +411,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" {
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
if container != "" && parts.ContainerName != container {
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
}
containerURL := azblob.NewContainerURL(*u, pipeline)
f.cntURLcache[parts.ContainerName] = &containerURL
f.isLimited = true
f.container = parts.ContainerName
containerURL = azblob.NewContainerURL(*u, pipeline)
} else {
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
}
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
}
f.svcURL = &serviceURL
f.cntURL = &containerURL
if f.rootContainer != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
f.root = oldRoot
return f, nil
}
return nil, err
@@ -465,20 +453,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// return the container URL for the container passed in
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
var ok bool
if containerURL, ok = f.cntURLcache[container]; !ok {
cntURL := f.svcURL.NewContainerURL(container)
containerURL = &cntURL
f.cntURLcache[container] = containerURL
}
return containerURL
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -508,8 +482,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// getBlobReference creates an empty blob reference with no metadata
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
return f.cntURL(container).NewBlobURL(containerPath)
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
return f.cntURL.NewBlobURL(f.root + remote)
}
// updateMetadataWithModTime adds the modTime passed in to o.meta.
@@ -545,18 +519,16 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied
//
// dir is the starting directory, "" for root
//
// The remote has prefix removed from it and if addContainer is set then
// it adds the container to the start.
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
if f.cache.IsDeleted(container) {
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
f.containerOKMu.Lock()
deleted := f.containerDeleted
f.containerOKMu.Unlock()
if deleted {
return fs.ErrorDirNotFound
}
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
root := f.root
if dir != "" {
root += dir + "/"
}
delimiter := ""
if !recurse {
@@ -571,14 +543,15 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
UncommittedBlobs: false,
Deleted: false,
},
Prefix: directory,
Prefix: root,
MaxResults: int32(maxResults),
}
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(err)
})
@@ -591,24 +564,33 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// Advance marker to next
marker = response.NextMarker
for i := range response.Segment.BlobItems {
file := &response.Segment.BlobItems[i]
// Finish if file name no longer has prefix
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
// }
remote := f.opt.Enc.ToStandardPath(file.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Debugf(f, "Odd name received %q", remote)
if !strings.HasPrefix(file.Name, f.root) {
fs.Debugf(f, "Odd name received %q", file.Name)
continue
}
remote = remote[len(prefix):]
remote := file.Name[len(f.root):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker
}
if addContainer {
remote = path.Join(container, remote)
}
// Send object
err = fn(remote, file, false)
if err != nil {
@@ -618,14 +600,14 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/")
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(remote, f.root) {
fs.Debugf(f, "Odd directory name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
remote = remote[len(f.root):]
// Don't send if already sent as a directory marker
if _, found := directoryMarkers[remote]; found {
continue
}
// Send object
err = fn(remote, nil, true)
@@ -650,9 +632,19 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil
}
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerDeleted = false
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -666,24 +658,17 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
f.markContainerOK()
return entries, nil
}
// listContainers returns all the containers to out
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
if f.isLimited {
f.cntURLcacheMu.Lock()
for container := range f.cntURLcache {
d := fs.NewDir(container, time.Time{})
entries = append(entries, d)
}
f.cntURLcacheMu.Unlock()
return entries, nil
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), container.Properties.LastModified)
f.cache.MarkOK(container.Name)
d := fs.NewDir(container.Name, container.Properties.LastModified)
entries = append(entries, d)
return nil
})
@@ -703,14 +688,10 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
if f.container == "" {
return f.listContainers(dir)
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -730,43 +711,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
if f.container == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return list.Add(entry)
})
if err != nil {
return err
}
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush()
}
@@ -816,43 +776,86 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return fs, fs.Update(ctx, in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.cache.MarkDeleted(container)
return true, err
}
// now try to create the container
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
f.containerOK = true
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.containerDeleted = true
return true, err
}
}
return f.shouldRetry(err)
})
}, nil)
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = true
f.containerDeleted = false
}
return errors.Wrap(err, "failed to make container")
}
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
// isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
empty := true
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false
return nil
})
@@ -867,42 +870,47 @@ func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err erro
// deleteContainer deletes the container. It can delete a full
// container so use isEmpty if you don't want that.
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
return f.cache.Remove(container, func() error {
options := azblob.ContainerAccessConditions{}
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL(container).Delete(ctx, options)
}
func (f *Fs) deleteContainer() error {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
options := azblob.ContainerAccessConditions{}
ctx := context.Background()
err := f.pacer.Call(func() (bool, error) {
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL.Delete(ctx, options)
}
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
})
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = false
f.containerDeleted = true
}
return errors.Wrap(err, "failed to delete container")
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.isEmpty(ctx, container, directory)
err := f.isEmpty(ctx, dir)
if err != nil {
return err
}
return f.deleteContainer(ctx, container)
if f.root != "" || dir != "" {
return nil
}
return f.deleteContainer()
}
// Precision of the remote
@@ -918,12 +926,11 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat!
container, directory := f.split(dir)
if container == "" || directory != "" {
// Delegate to caller if not root of a container
if f.root != "" || dir != "" {
// Delegate to caller if not root container
return fs.ErrorCantPurge
}
return f.deleteContainer(ctx, container)
return f.deleteContainer()
}
// Copy src to this remote using server side copy operations.
@@ -936,8 +943,7 @@ func (f *Fs) Purge(ctx context.Context) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -946,7 +952,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
dstBlobURL := f.getBlobReference(remote)
srcBlobURL := srcObj.getBlobReference()
source, err := url.Parse(srcBlobURL.String())
@@ -1079,8 +1085,7 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// getBlobReference creates an empty blob reference with no metadata
func (o *Object) getBlobReference() azblob.BlobURL {
container, directory := o.split()
return o.fs.getBlobReference(container, directory)
return o.fs.getBlobReference(o.remote)
}
// clearMetaData clears enough metadata so readMetaData will re-read it
@@ -1120,6 +1125,22 @@ func (o *Object) readMetaData() (err error) {
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
}
// parseTimeString converts a decimal string number of milliseconds
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
// the modTime variable.
func (o *Object) parseTimeString(timeString string) (err error) {
if timeString == "" {
return nil
}
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err
}
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
@@ -1164,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
@@ -1370,8 +1391,7 @@ outer:
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
err = o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
@@ -1509,6 +1529,4 @@ var (
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -50,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -104,14 +104,13 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: f.opt.Enc.FromStandardPath(bucketPath),
Name: o.fs.root + remote,
ContentType: fs.MimeType(ctx, src),
Info: map[string]string{
timeKey: timeString(modTime),
@@ -125,8 +124,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -150,7 +149,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
@@ -162,8 +161,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
@@ -192,12 +191,12 @@ func (up *largeUpload) clearUploadURL() {
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
@@ -241,8 +240,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
@@ -264,7 +263,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
@@ -276,8 +275,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -286,7 +285,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -296,18 +295,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
@@ -317,7 +316,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
@@ -326,19 +325,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish(ctx)
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
@@ -346,7 +345,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
@@ -388,16 +387,16 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error {
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
@@ -428,10 +427,10 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}

View File

@@ -202,23 +202,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}

View File

@@ -11,12 +11,8 @@ package box
import (
"context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
@@ -25,11 +21,6 @@ import (
"strings"
"time"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
@@ -38,14 +29,12 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -60,7 +49,6 @@ const (
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
)
// Globals
@@ -85,34 +73,9 @@ func init() {
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
err := oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
@@ -121,19 +84,6 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
@@ -144,98 +94,14 @@ func init() {
Help: "Max number of times to try committing a multipart file.",
Default: 100,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
// > Box only supports file or folder names that are 255 characters or less.
// > File names containing non-printable ascii, "/" or "\", names with leading
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
//
// Testing revealed names with leading spaces work fine.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
})
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
}
// Fs represents a remote box
@@ -315,6 +181,18 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for box
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "", -1)
}
// restore reserved characters for box
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
@@ -326,7 +204,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -474,7 +352,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -502,13 +380,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Parameters: fieldsValue(),
}
mkdir := api.CreateFolder{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: pathID,
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -530,7 +408,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
@@ -545,7 +423,7 @@ OUTER:
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -568,7 +446,7 @@ OUTER:
if item.ItemStatus != api.ItemStatusActive {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
item.Name = restoreReservedChars(item.Name)
if fn(item) {
found = true
break OUTER
@@ -601,7 +479,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -703,14 +581,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/" + id,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
@@ -741,7 +619,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -804,8 +682,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Path: "/files/" + srcObj.id + "/copy",
Parameters: fieldsValue(),
}
replacedLeaf := replaceReservedChars(leaf)
copyFile := api.CopyFile{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replacedLeaf,
Parent: api.Parent{
ID: directoryID,
},
@@ -813,7 +692,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -836,7 +715,7 @@ func (f *Fs) Purge(ctx context.Context) error {
}
// move a file or folder
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object
opts := rest.Opts{
Method: "PUT",
@@ -844,14 +723,14 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
Parameters: fieldsValue(),
}
move := api.UpdateFileMove{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -883,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil {
return nil, err
}
@@ -966,7 +845,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
_, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
@@ -1008,7 +887,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
@@ -1045,6 +924,11 @@ func (o *Object) Remote() string {
return o.remote
}
// srvPath returns a path for use in server
func (o *Object) srvPath() string {
return replaceReservedChars(o.fs.rootSlash() + o.remote)
}
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.SHA1 {
@@ -1122,7 +1006,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
@@ -1155,7 +1039,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1167,9 +1051,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime),
ContentCreatedAt: api.Time(modTime),
Parent: api.Parent{
@@ -1194,7 +1078,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
opts.Path = "/files/content"
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1227,16 +1111,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime)
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
}
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id)
return o.fs.deleteObject(o.id)
}
// ID returns the ID of the Object if known, or "" if not

View File

@@ -4,7 +4,6 @@ package box
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
@@ -23,7 +22,7 @@ import (
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
@@ -38,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
} else {
opts.Path = "/files/upload_sessions"
request.FolderID = directoryID
request.FileName = o.fs.opt.Enc.FromStandardName(leaf)
request.FileName = replaceReservedChars(leaf)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -105,7 +104,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
@@ -155,7 +154,7 @@ outer:
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
@@ -236,7 +235,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
@@ -264,7 +263,7 @@ outer:
}
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}

View File

@@ -1864,24 +1864,6 @@ func cleanPath(p string) string {
return p
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1897,6 +1879,4 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
)

View File

@@ -1,5 +1,4 @@
// +build !plan9
// +build !race
package cache_test
@@ -18,6 +17,7 @@ import (
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
@@ -33,7 +33,6 @@ import (
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
@@ -356,8 +355,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err)
} else {
testData1 = []byte(random.String(100))
testData2 = []byte(random.String(200))
testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(fstest.RandomString(200))
}
// write the object
@@ -1139,6 +1138,23 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
return f
}
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
r.writeRemoteBytes(t, f, remote, testData)
return remote
}
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
// create some rand test data
testData := randStringBytes(int(size))
return r.writeObjectBytes(t, f, remote, testData)
}
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
r.writeRemoteBytes(t, f, remote, []byte(content))
}
@@ -1327,6 +1343,26 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
return l, err
}
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
var err error
var l []string
if r.useMount {
var list []os.FileInfo
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
for _, ll := range list {
l = append(l, ll.Name())
}
} else {
var list fs.DirEntries
list, err = f.List(context.Background(), remote)
for _, ll := range list {
l = append(l, ll.Remote())
}
}
require.NoError(t, err)
return l
}
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
in, err := os.Open(src)
if err != nil {

View File

@@ -1,21 +0,0 @@
// +build !linux !go1.13
// +build !darwin !go1.13
// +build !freebsd !go1.13
// +build !windows
// +build !race
package cache_test
import (
"testing"
"github.com/rclone/rclone/fs"
)
func (r *run) mountFs(t *testing.T, f fs.Fs) {
panic("mountFs not defined for this platform")
}
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
panic("unmountFs not defined for this platform")
}

View File

@@ -1,5 +1,4 @@
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
// +build !race
// +build !plan9,!windows
package cache_test

View File

@@ -1,5 +1,4 @@
// +build windows
// +build !race
package cache_test

View File

@@ -1,7 +1,6 @@
// Test Cache filesystem interface
// +build !plan9
// +build !race
package cache_test
@@ -20,6 +19,5 @@ func TestIntegration(t *testing.T) {
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}

View File

@@ -1,5 +1,4 @@
// +build !plan9
// +build !race
package cache_test

View File

@@ -101,6 +101,15 @@ func (d *Directory) abs() string {
return cleanPath(path.Join(d.Dir, d.Name))
}
// parentRemote returns the absolute path parent remote
func (d *Directory) parentRemote() string {
absPath := d.abs()
if absPath == "" {
return ""
}
return cleanPath(path.Dir(absPath))
}
// ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time {
return time.Unix(0, d.CacheModTime)

View File

@@ -24,16 +24,15 @@ const (
type Object struct {
fs.Object `json:"-"`
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
cacheHashesMu sync.Mutex
ParentFs fs.Fs `json:"-"` // parent fs
CacheFs *Fs `json:"-"` // cache fs
Name string `json:"name"` // name of the directory
Dir string `json:"dir"` // abs path of the object
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
CacheStorable bool `json:"storable"` // says whether this object can be stored
CacheType string `json:"cacheType"`
CacheTs time.Time `json:"cacheTs"`
CacheHashes map[hash.Type]string // all supported hashes cached
refreshMutex sync.Mutex
@@ -104,9 +103,7 @@ func (o *Object) updateData(ctx context.Context, source fs.Object) {
o.CacheSize = source.Size()
o.CacheStorable = source.Storable()
o.CacheTs = time.Now()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
}
// Fs returns its FS info
@@ -271,9 +268,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.CacheModTime = src.ModTime(ctx).UnixNano()
o.CacheSize = src.Size()
o.cacheHashesMu.Lock()
o.CacheHashes = make(map[hash.Type]string)
o.cacheHashesMu.Unlock()
o.CacheTs = time.Now()
o.persist()
@@ -314,12 +309,11 @@ func (o *Object) Remove(ctx context.Context) error {
// since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
_ = o.refresh(ctx)
o.cacheHashesMu.Lock()
if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string)
}
cachedHash, found := o.CacheHashes[ht]
o.cacheHashesMu.Unlock()
if found {
return cachedHash, nil
}
@@ -330,9 +324,7 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if err != nil {
return "", err
}
o.cacheHashesMu.Lock()
o.CacheHashes[ht] = liveHash
o.cacheHashesMu.Unlock()
o.persist()
fs.Debugf(o, "object hash cached: %v", liveHash)

View File

@@ -16,10 +16,10 @@ import (
"sync"
"time"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
)
// Constants
@@ -767,6 +767,31 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
return err
}
func (b *Persistent) dumpRoot() string {
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
m := make(map[string]interface{})
c := buk.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
buk2 := buk.Bucket(k)
m[string(k)] = itBuckets(buk2)
} else {
m[string(k)] = "-"
}
}
return m
}
var mm map[string]interface{}
_ = b.db.View(func(tx *bolt.Tx) error {
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
return nil
})
raw, _ := json.MarshalIndent(mm, "", " ")
return string(raw)
}
// addPendingUpload adds a new file to the pending queue of uploads
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {

File diff suppressed because it is too large Load Diff

View File

@@ -1,691 +0,0 @@
package chunker
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"path"
"regexp"
"strings"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Command line flags
var (
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
)
// test that chunking does not break large uploads
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}
// test chunk name parser
func testChunkNameFormat(t *testing.T, f *Fs) {
saveOpt := f.opt
defer func() {
// restore original settings (f is pointer, f.opt is struct)
f.opt = saveOpt
_ = f.setChunkNameFormat(f.opt.NameFormat)
}()
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
assert.Equal(t, wantDataFormat, f.dataNameFmt)
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
}
assertFormatValid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.NoError(t, err)
}
assertFormatInvalid := func(pattern string) {
err := f.setChunkNameFormat(pattern)
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
}
// invalid formats
assertFormatInvalid(`chunk-#`)
assertFormatInvalid(`*-chunk`)
assertFormatInvalid(`*-*-chunk-#`)
assertFormatInvalid(`*-chunk-#-#`)
assertFormatInvalid(`#-chunk-*`)
assertFormatInvalid(`*/#`)
assertFormatValid(`*#`)
assertFormatInvalid(`**#`)
assertFormatInvalid(`#*`)
assertFormatInvalid(``)
assertFormatInvalid(`-`)
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
}
func testSmallFileInternals(t *testing.T, f *Fs) {
const dir = "small"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
checkSmallFileInternals := func(obj fs.Object) {
assert.NotNil(t, obj)
o, ok := obj.(*Object)
assert.True(t, ok)
assert.NotNil(t, o)
if o == nil {
return
}
switch {
case !f.useMeta:
// If meta format is "none", non-chunked file (even empty)
// internally is a single chunk without meta object.
assert.Nil(t, o.main)
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
assert.Equal(t, 1, len(o.chunks))
case f.hashAll:
// Consistent hashing forces meta object on small files too
assert.NotNil(t, o.main)
assert.True(t, o.isComposite())
assert.Equal(t, 1, len(o.chunks))
default:
// normally non-chunked file is kept in the Object's main field
assert.NotNil(t, o.main)
assert.False(t, o.isComposite())
assert.Equal(t, 0, len(o.chunks))
}
}
checkContents := func(obj fs.Object, contents string) {
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size())
r, err := obj.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
if r == nil {
return
}
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
}
checkHashsum := func(obj fs.Object) {
var ht hash.Type
switch {
case !f.hashAll:
return
case f.useMD5:
ht = hash.MD5
case f.useSHA1:
ht = hash.SHA1
default:
return
}
// even empty files must have hashsum in consistent mode
sum, err := obj.Hash(ctx, ht)
assert.NoError(t, err)
assert.NotEqual(t, sum, "")
}
checkSmallFile := func(name, contents string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
assert.NotNil(t, put)
checkSmallFileInternals(put)
checkContents(put, contents)
checkHashsum(put)
// objects returned by Put and NewObject must have similar structure
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err)
assert.NotNil(t, obj)
checkSmallFileInternals(obj)
checkContents(obj, contents)
checkHashsum(obj)
_ = obj.Remove(ctx)
_ = put.Remove(ctx) // for good
}
checkSmallFile("emptyfile", "")
checkSmallFile("smallfile", "Ok")
}
func testPreventCorruption(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "corrupted"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = true
contents := random.String(250)
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
const overlapMessage = "chunk overlap"
assertOverlapError := func(err error) {
assert.Error(t, err)
if err != nil {
assert.Contains(t, err.Error(), overlapMessage)
}
}
newFile := func(name string) fs.Object {
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj
}
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
}
err := f.Mkdir(ctx, billyChunkName(1))
assertOverlapError(err)
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
assert.Error(t, err)
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
// accessing chunks in strict mode is prohibited
f.opt.FailHard = true
billyChunk4Name := billyChunkName(4)
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
assertOverlapError(err)
f.opt.FailHard = false
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
assert.NoError(t, err)
require.NotNil(t, billyChunk4)
f.opt.FailHard = true
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
assertOverlapError(err)
// you can freely read chunks (if you have an object)
r, err := billyChunk4.Open(ctx)
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
assert.NotEqual(t, contents, string(chunkContents))
// but you can't change them
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
assertOverlapError(err)
// Remove isn't special, you can't corrupt files even if you have an object
err = billyChunk4.Remove(ctx)
assertOverlapError(err)
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
assert.NoError(t, err)
require.NotNil(t, willyChunk)
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
assertOverlapError(err)
// operations.Move will return error when chunker's Move refused
// to corrupt target file, but reverts to copy/delete method
// still trying to delete target chunk. Chunker must come to rescue.
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
assertOverlapError(err)
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
}
func testChunkNumberOverflow(t *testing.T, f *Fs) {
if f.opt.ChunkSize > 50 {
t.Skip("this test requires small chunks")
}
const dir = "wreaked"
const wreakNumber = 10200300
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
contents := random.String(100)
newFile := func(f fs.Fs, name string) (fs.Object, string) {
filename := path.Join(dir, name)
item := fstest.Item{Path: filename, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
require.NotNil(t, obj)
return obj, filename
}
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
_, err := f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = true
_, err = f.List(ctx, dir)
assert.Error(t, err)
_, err = f.NewObject(ctx, fileName)
assert.Error(t, err)
f.opt.FailHard = false
_ = wreak.Remove(ctx)
_ = file.Remove(ctx)
}
func testMetadataInput(t *testing.T, f *Fs) {
const minChunkForTest = 50
if f.opt.ChunkSize < minChunkForTest {
t.Skip("this test requires chunks that fit metadata")
}
const dir = "usermeta"
ctx := context.Background()
saveOpt := f.opt
defer func() {
f.opt.FailHard = false
_ = operations.Purge(ctx, f.base, dir)
f.opt = saveOpt
}()
f.opt.FailHard = false
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
item := fstest.Item{Path: name, ModTime: modTime}
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
assert.NotNil(t, obj, message)
return obj
}
runSubtest := func(contents, name string) {
description := fmt.Sprintf("file with %s metadata", name)
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)
assert.NoError(t, err, "access "+description)
assert.NotNil(t, obj)
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
o, ok := obj.(*Object)
assert.NotNil(t, ok)
if o != nil {
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
o = nil
}
defer func() {
_ = obj.Remove(ctx)
_ = part.Remove(ctx)
}()
r, err := obj.Open(ctx)
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
}
}
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
require.NoError(t, err)
todaysMeta := string(metaData)
runSubtest(todaysMeta, "today")
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
runSubtest(pastMeta, "past")
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
runSubtest(futureMeta, "future")
}
// InternalTest dispatches all internal tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("PutLarge", func(t *testing.T) {
if *UploadKilobytes <= 0 {
t.Skip("-upload-kilobytes is not set")
}
testPutLarge(t, f, *UploadKilobytes)
})
t.Run("ChunkNameFormat", func(t *testing.T) {
testChunkNameFormat(t, f)
})
t.Run("SmallFileInternals", func(t *testing.T) {
testSmallFileInternals(t, f)
})
t.Run("PreventCorruption", func(t *testing.T) {
testPreventCorruption(t, f)
})
t.Run("ChunkNumberOverflow", func(t *testing.T) {
testChunkNumberOverflow(t, f)
})
t.Run("MetadataInput", func(t *testing.T) {
testMetadataInput(t, f)
})
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1,58 +0,0 @@
// Test the Chunker filesystem interface
package chunker_test
import (
"flag"
"os"
"path/filepath"
"testing"
_ "github.com/rclone/rclone/backend/all" // for integration tests
"github.com/rclone/rclone/backend/chunker"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// Command line flags
var (
// Invalid characters are not supported by some remotes, eg. Mailru.
// We enable testing with invalid characters when -remote is not set, so
// chunker overlays a local directory, but invalid characters are disabled
// by default when -remote is set, eg. when test_all runs backend tests.
// You can still test with invalid characters using the below flag.
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
)
// TestIntegration runs integration tests against a concrete remote
// set by the -remote flag. If the flag is not set, it creates a
// dynamic chunker overlay wrapping a local temporary directory.
func TestIntegration(t *testing.T) {
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*chunker.Object)(nil),
SkipBadWindowsCharacters: !*UseBadChars,
UnimplementableObjectMethods: []string{
"MimeType",
"GetTier",
"SetTier",
},
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"UserInfo",
"Disconnect",
},
}
if *fstest.RemoteName == "" {
name := "TestChunker"
opt.RemoteName = name + ":"
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "chunker"},
{Name: name, Key: "remote", Value: tempDir},
}
}
fstests.Run(t, &opt)
}

View File

@@ -208,6 +208,21 @@ func (c *cipher) putBlock(buf []byte) {
c.buffers.Put(buf)
}
// check to see if the byte string is valid with no control characters
// from 0x00 to 0x1F and is a valid UTF-8 string
func checkValidString(buf []byte) error {
for i := range buf {
c := buf[i]
if c >= 0x00 && c < 0x20 || c == 0x7F {
return ErrorBadDecryptControlChar
}
}
if !utf8.Valid(buf) {
return ErrorBadDecryptUTF8
}
return nil
}
// encodeFileName encodes a filename using a modified version of
// standard base32 as described in RFC4648
//
@@ -279,6 +294,10 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if err != nil {
return "", err
}
err = checkValidString(plaintext)
if err != nil {
return "", err
}
return string(plaintext), err
}

View File

@@ -44,6 +44,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
}
func TestValidString(t *testing.T) {
for _, test := range []struct {
in string
expected error
}{
{"", nil},
{"\x01", ErrorBadDecryptControlChar},
{"a\x02", ErrorBadDecryptControlChar},
{"abc\x03", ErrorBadDecryptControlChar},
{"abc\x04def", ErrorBadDecryptControlChar},
{"\x05d", ErrorBadDecryptControlChar},
{"\x06def", ErrorBadDecryptControlChar},
{"\x07", ErrorBadDecryptControlChar},
{"\x08", ErrorBadDecryptControlChar},
{"\x09", ErrorBadDecryptControlChar},
{"\x0A", ErrorBadDecryptControlChar},
{"\x0B", ErrorBadDecryptControlChar},
{"\x0C", ErrorBadDecryptControlChar},
{"\x0D", ErrorBadDecryptControlChar},
{"\x0E", ErrorBadDecryptControlChar},
{"\x0F", ErrorBadDecryptControlChar},
{"\x10", ErrorBadDecryptControlChar},
{"\x11", ErrorBadDecryptControlChar},
{"\x12", ErrorBadDecryptControlChar},
{"\x13", ErrorBadDecryptControlChar},
{"\x14", ErrorBadDecryptControlChar},
{"\x15", ErrorBadDecryptControlChar},
{"\x16", ErrorBadDecryptControlChar},
{"\x17", ErrorBadDecryptControlChar},
{"\x18", ErrorBadDecryptControlChar},
{"\x19", ErrorBadDecryptControlChar},
{"\x1A", ErrorBadDecryptControlChar},
{"\x1B", ErrorBadDecryptControlChar},
{"\x1C", ErrorBadDecryptControlChar},
{"\x1D", ErrorBadDecryptControlChar},
{"\x1E", ErrorBadDecryptControlChar},
{"\x1F", ErrorBadDecryptControlChar},
{"\x20", nil},
{"\x7E", nil},
{"\x7F", ErrorBadDecryptControlChar},
{"£100", nil},
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
{"£100", nil},
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
{"a", nil}, // Valid ASCII
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
} {
actual := checkValidString([]byte(test.in))
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
}
}
func TestEncodeFileName(t *testing.T) {
for _, test := range []struct {
in string
@@ -147,6 +210,8 @@ func TestDecryptSegment(t *testing.T) {
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
} {
actual, actualErr := c.decryptSegment(test.in)
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
@@ -640,16 +705,16 @@ var (
// Test test infrastructure first!
func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8)
sink := newRandomSource(1e8)
source := newRandomSource(1E8)
sink := newRandomSource(1E8)
n, err := io.Copy(sink, source)
assert.NoError(t, err)
assert.Equal(t, int64(1e8), n)
assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8)
source = newRandomSource(1E8)
buf := make([]byte, 16)
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
sink = newRandomSource(1E8)
_, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream")
}
@@ -689,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
}
func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7)
testEncryptDecrypt(t, 1, 1E7)
}
func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8)
testEncryptDecrypt(t, 32, 1E8)
}
func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8)
testEncryptDecrypt(t, 4096, 1E8)
}
func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8)
testEncryptDecrypt(t, 65536, 1E8)
}
func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8)
testEncryptDecrypt(t, 65537, 1E8)
}
var (
@@ -738,7 +803,7 @@ func TestEncryptData(t *testing.T) {
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works
buf := bytes.NewBuffer(test.in)
@@ -761,7 +826,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{}
@@ -788,7 +853,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -820,7 +885,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd)
@@ -871,7 +936,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}

View File

@@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
@@ -36,21 +35,19 @@ func init() {
Default: "standard",
Examples: []fs.OptionExample{
{
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
}, {
Value: "standard",
Help: "Encrypt the filenames see the docs for the details.",
}, {
Value: "obfuscate",
Help: "Very simple filename obfuscation.",
}, {
Value: "off",
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
},
},
}, {
Name: "directory_name_encryption",
Help: `Option to either encrypt directory names or leave them intact.
NB If filename_encryption is "off" then this option will do nothing.`,
Name: "directory_name_encryption",
Help: "Option to either encrypt directory names or leave them intact.",
Default: true,
Examples: []fs.OptionExample{
{
@@ -66,7 +63,6 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
@@ -147,10 +143,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
}
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}
// Look for a file first
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
@@ -810,24 +802,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
return newDir
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
@@ -914,8 +888,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -10,7 +10,6 @@ package drive
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
@@ -38,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
@@ -158,7 +156,6 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -180,7 +177,7 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
}
err = configTeamDrive(ctx, opt, m, name)
err = configTeamDrive(opt, m, name)
if err != nil {
log.Fatalf("Failed to configure team drive: %v", err)
}
@@ -212,15 +209,7 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
Note that if this is blank, the first time rclone runs it will fill it
in with the ID of the root folder.
`,
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
@@ -324,19 +313,6 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "list_chunk",
Default: 1000,
@@ -395,22 +371,11 @@ will download it anyway.`,
}, {
Name: "size_as_quota",
Default: false,
Help: `Show sizes as storage quota usage, not actual size.
Help: `Show storage quota usage for file size.
Show the size of a file as the the storage quota used. This is the
current version plus any older versions that have been set to keep
forever.
**WARNING**: This flag may have some unexpected consequences.
It is not recommended to set this flag in your config - the
recommended usage is using the flag form --drive-size-as-quota when
doing rclone ls/lsl/lsf/lsjson/etc only.
If you do use this flag for syncing (not recommended) then you will
need to use --ignore size also.`,
The storage used by a file is the size of the current version plus any
older versions that have been set to keep forever.`,
Advanced: true,
Hide: fs.OptionHideConfigurator,
}, {
Name: "v2_download_min_size",
Default: fs.SizeSuffix(-1),
@@ -433,47 +398,9 @@ need to use --ignore size also.`,
This can be useful if you wish to do a server side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
because it isn't easy to tell if it will work beween any two
configurations.`,
Advanced: true,
}, {
Name: "disable_http2",
Default: true,
Help: `Disable drive using http2
There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
but can be re-enabled here. When the issue is solved this flag will
be removed.
See: https://github.com/rclone/rclone/issues/3631
`,
Advanced: true,
}, {
Name: "stop_on_upload_limit",
Default: false,
Help: `Make upload limit errors be fatal
At the time of writing it is only possible to upload 750GB of data to
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
the in-progress sync.
Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default: encoder.EncodeInvalidUtf8,
}},
})
@@ -493,38 +420,34 @@ See: https://github.com/rclone/rclone/issues/3857
// Options defines the configuration for this backend
type Options struct {
Scope string `config:"scope"`
RootFolderID string `config:"root_folder_id"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"`
ExportExtensions string `config:"export_formats"`
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"`
SizeAsQuota bool `config:"size_as_quota"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
Scope string `config:"scope"`
RootFolderID string `config:"root_folder_id"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"`
ExportExtensions string `config:"export_formats"`
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"`
SizeAsQuota bool `config:"size_as_quota"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
}
// Fs represents a remote drive server
@@ -595,7 +518,7 @@ func (f *Fs) Features() *fs.Features {
}
// shouldRetry determines whether a given err rates being retried
func (f *Fs) shouldRetry(err error) (bool, error) {
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
@@ -611,10 +534,6 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
if len(gerr.Errors) > 0 {
reason := gerr.Errors[0].Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
if f.opt.StopOnUploadLimit && gerr.Errors[0].Message == "User rate limit exceeded." {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
}
return true, err
}
}
@@ -642,23 +561,6 @@ func containsString(slice []string, s string) bool {
return false
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
return info.Id, nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
@@ -696,10 +598,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
}
var stems []string
if title != "" {
searchTitle := f.opt.Enc.FromStandardName(title)
// Escaping the backslash isn't documented but seems to work
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
searchTitle := strings.Replace(title, `\`, `\\`, -1)
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
// Convert to / for search
searchTitle = strings.Replace(searchTitle, "", "/", -1)
var titleQuery bytes.Buffer
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
@@ -747,9 +650,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
@@ -757,25 +657,24 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
fields = fmt.Sprintf("files(%s),nextPageToken", fields)
OUTER:
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
return f.shouldRetry(err)
files, err = list.Fields(googleapi.Field(fields)).Do()
return shouldRetry(err)
})
if err != nil {
return false, errors.Wrap(err, "couldn't list directory")
}
if files.IncompleteSearch {
fs.Errorf(f, "search result INCOMPLETE")
}
for _, item := range files.Files {
item.Name = f.opt.Enc.ToStandardName(item.Name)
// Convert / to for listing purposes
item.Name = strings.Replace(item.Name, "/", "", -1)
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
found := false
for _, stem := range stems {
@@ -879,7 +778,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
return nil
@@ -889,7 +788,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
if !config.Confirm() {
return nil
}
client, err := createOAuthClient(opt, name, m)
@@ -904,12 +803,11 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
var driveIDs, driveNames []string
listTeamDrives := svc.Teamdrives.List().PageSize(100)
listFailed := false
var defaultFs Fs // default Fs with default Options
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
return defaultFs.shouldRetry(err)
teamDrives, err = listTeamDrives.Do()
return shouldRetry(err)
})
if err != nil {
fmt.Printf("Listing team drives failed: %v\n", err)
@@ -941,18 +839,6 @@ func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
@@ -962,7 +848,7 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
}
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@@ -984,7 +870,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
return nil, errors.Wrap(err, "failed to create oauth client from service account")
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
}
@@ -1081,25 +967,15 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
}
// set root folder for a team drive or query the user root folder
if opt.RootFolderID != "" {
// override root folder if set or cached in the config
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else {
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil, err
}
}
f.rootFolderID = rootID
m.Set("root_folder_id", rootID)
f.rootFolderID = "root"
}
// override root folder if set in the config
if opt.RootFolderID != "" {
f.rootFolderID = opt.RootFolderID
}
f.dirCache = dircache.New(root, f.rootFolderID, f)
@@ -1155,8 +1031,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
}
size := info.Size
if f.opt.SizeAsQuota {
@@ -1335,7 +1209,6 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
leaf = f.opt.Enc.FromStandardName(leaf)
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
createInfo := &drive.File{
@@ -1350,7 +1223,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Fields("id").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return "", err
@@ -1393,7 +1266,7 @@ func (f *Fs) fetchFormats() {
about, err = f.svc.About.Get().
Fields("exportFormats,importFormats").
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
fs.Errorf(f, "Failed to get Drive exportFormats and importFormats: %v", err)
@@ -1525,14 +1398,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if iErr != nil {
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil
}
@@ -1592,22 +1457,10 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
for _, parent := range item.Parents {
var i int
// If only one item in paths then no need to search for the ID
// assuming google drive is doing its job properly.
//
// Note that we at the root when len(paths) == 1 && paths[0] == ""
if len(paths) == 1 {
// don't check parents at root because
// - shared with me items have no parents at the root
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
i = 0
} else {
// only handle parents that are in the requested dirs list if not at root
i = sort.SearchStrings(dirs, parent)
if i == len(dirs) || dirs[i] != parent {
continue
}
// only handle parents that are in the requested dirs list
i := sort.SearchStrings(dirs, parent)
if i == len(dirs) || dirs[i] != parent {
continue
}
remote := path.Join(paths[i], item.Name)
entry, err := f.itemToDirEntry(remote, item)
@@ -1671,6 +1524,20 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
if err != nil {
return err
}
if directoryID == "root" {
var info *drive.File
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return err
}
directoryID = info.Id
}
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
@@ -1678,7 +1545,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error {
mu.Lock()
@@ -1691,7 +1557,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
return list.Add(entry)
}
@@ -1748,21 +1613,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return err
}
err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
return list.Flush()
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
@@ -1793,7 +1644,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
return nil, err
}
leaf = f.opt.Enc.FromStandardName(leaf)
// Define the metadata for the file we are going to create.
createInfo := &drive.File{
Name: leaf,
@@ -1867,7 +1717,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
}
var info *drive.File
if size >= 0 && size < int64(f.opt.UploadCutoff) {
if size == 0 || size < int64(f.opt.UploadCutoff) {
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
err = f.pacer.CallNoRetry(func() (bool, error) {
@@ -1877,14 +1727,14 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
} else {
// Upload the file in chunks
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
if err != nil {
return nil, err
}
@@ -1920,7 +1770,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
@@ -1966,7 +1816,7 @@ func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
return shouldRetry(err)
})
}
@@ -2064,7 +1914,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2091,9 +1941,6 @@ func (f *Fs) Purge(ctx context.Context) error {
if f.root == "" {
return errors.New("can't purge root directory")
}
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
}
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
@@ -2113,7 +1960,7 @@ func (f *Fs) Purge(ctx context.Context) error {
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
return shouldRetry(err)
})
f.dirCache.ResetRoot()
if err != nil {
@@ -2125,8 +1972,8 @@ func (f *Fs) Purge(ctx context.Context) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
return f.shouldRetry(err)
err := f.svc.Files.EmptyTrash().Do()
return shouldRetry(err)
})
if err != nil {
@@ -2135,38 +1982,17 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil
}
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return f.shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
return f.shouldRetry(err)
about, err = f.svc.About.Get().Fields("storageQuota").Do()
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get Drive storageQuota")
@@ -2238,7 +2064,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Fields(partialFields).
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -2274,7 +2100,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return "", err
@@ -2374,7 +2200,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Fields("").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -2424,7 +2250,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
}
fs.Debugf(f, "Checking for changes on remote")
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
@@ -2435,12 +2261,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Do()
return f.shouldRetry(err)
startPageToken, err = f.svc.Changes.GetStartPageToken().
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return
@@ -2448,7 +2272,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
return startPageToken.StartPageToken, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
pageToken := startPageToken
for {
var changeList *drive.ChangeList
@@ -2462,14 +2286,10 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
changesCall.SupportsAllDrives(true)
changesCall.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
changesCall.DriveId(f.opt.TeamDriveID)
changesCall.TeamDriveId(f.opt.TeamDriveID)
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
}
changeList, err = changesCall.Context(ctx).Do()
return f.shouldRetry(err)
changeList, err = changesCall.Do()
return shouldRetry(err)
})
if err != nil {
return
@@ -2492,7 +2312,6 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
// find the new path
if change.File != nil {
change.File.Name = f.opt.Enc.ToStandardName(change.File.Name)
changeType := fs.EntryDirectory
if change.File.MimeType != driveFolderType {
changeType = fs.EntryObject
@@ -2660,7 +2479,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
Fields(partialFields).
SupportsAllDrives(true).
Do()
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return err
@@ -2677,7 +2496,7 @@ func (o *baseObject) Storable() bool {
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
if url == "" {
return nil, nil, errors.New("forbidden to download - check sharing permission")
}
@@ -2685,7 +2504,6 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
if err != nil {
return req, nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
@@ -2699,7 +2517,7 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
_ = res.Body.Close() // ignore error
}
}
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return req, nil, err
@@ -2756,8 +2574,8 @@ func isGoogleError(err error, what string) bool {
}
// open a url for reading
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(ctx, url, "GET", options)
func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(url, "GET", options)
if err != nil {
if isGoogleError(err, "cannotDownloadAbusiveFile") {
if o.fs.opt.AcknowledgeAbuse {
@@ -2768,7 +2586,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "?"
}
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
_, res, err = o.httpResponse(url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
}
@@ -2789,7 +2607,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Fields("downloadUrl").
SupportsAllDrives(true).
Do()
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Using v2 download: %v", v2File.DownloadUrl)
@@ -2797,7 +2615,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.v2Download = false
}
}
return o.baseObject.open(ctx, o.url, options...)
return o.baseObject.open(o.url, options...)
}
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from
@@ -2822,7 +2640,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
if offset != 0 {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
in, err = o.baseObject.open(ctx, o.url, options...)
in, err = o.baseObject.open(o.url, options...)
if in != nil {
in = &openDocumentFile{o: o, in: in}
}
@@ -2857,11 +2675,11 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo) (info *drive.File, err error) {
// Make the API request to upload metadata and file data.
size := src.Size()
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
@@ -2870,12 +2688,12 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
SupportsAllDrives(true).
KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Do()
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
return
}
// Upload the file in chunks
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
}
// Update the already existing object
@@ -2889,7 +2707,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
@@ -2926,7 +2744,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
}
updateInfo.MimeType = importMimeType
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
@@ -2970,7 +2788,7 @@ func (o *baseObject) Remove(ctx context.Context) error {
SupportsAllDrives(true).
Do()
}
return o.fs.shouldRetry(err)
return shouldRetry(err)
})
return err
}

View File

@@ -11,15 +11,15 @@
package drive
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
@@ -50,7 +50,7 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
@@ -81,21 +81,18 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
req.Header.Set("X-Upload-Content-Type", contentType)
if size >= 0 {
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
}
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
res, err = f.client.Do(req)
if err == nil {
defer googleapi.CloseBody(res)
err = googleapi.CheckResponse(res)
}
return f.shouldRetry(err)
return shouldRetry(err)
})
if err != nil {
return nil, err
@@ -109,31 +106,60 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload(ctx)
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize
totalSize := "*"
if rx.ContentLength >= 0 {
totalSize = strconv.FormatInt(rx.ContentLength, 10)
}
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
} else {
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize))
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
}
req.Header.Set("Content-Type", rx.MediaType)
return req
}
// rangeRE matches the transfer status response from the server. $1 is
// the last byte index uploaded.
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
}
defer googleapi.CloseBody(res)
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
return rx.ContentLength, nil
}
if res.StatusCode != statusResumeIncomplete {
err = googleapi.CheckResponse(res)
if err != nil {
return 0, err
}
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
}
Range := res.Header.Get("Range")
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
start, err = strconv.ParseInt(m[1], 10, 64)
if err == nil {
return start, nil
}
}
return 0, errors.Errorf("unable to parse range %q", Range)
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
@@ -166,45 +192,23 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
buf := make([]byte, int(rx.f.opt.ChunkSize))
for finished := false; !finished; {
var reqSize int64
var chunk io.ReadSeeker
if rx.ContentLength >= 0 {
// If size known use repeatable reader for smoother bwlimit
if start >= rx.ContentLength {
break
}
reqSize = rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
} else {
// If size unknown read into buffer
var n int
n, err = readers.ReadFill(rx.Media, buf)
if err == io.EOF {
// Send the last chunk with the correct ContentLength
// otherwise Google doesn't know we've finished
rx.ContentLength = start + int64(n)
finished = true
} else if err != nil {
return nil, err
}
reqSize = int64(n)
chunk = bytes.NewReader(buf[:reqSize])
for start < rx.ContentLength {
reqSize := rx.ContentLength - start
if reqSize >= int64(rx.f.opt.ChunkSize) {
reqSize = int64(rx.f.opt.ChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
again, err := rx.f.shouldRetry(err)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false
err = nil

View File

@@ -39,7 +39,6 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/dropbox/dbhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -47,7 +46,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
@@ -104,14 +102,10 @@ var (
// A regexp matching path names for files Dropbox ignores
// See https://www.dropbox.com/en/help/145 - Ignored files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
// DbHashType is the hash.Type for Dropbox
DbHashType hash.Type
)
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
@@ -145,28 +139,14 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
// as invalid characters.
// Testing revealed names with trailing spaces and the DEL character don't work.
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
}
// Fs represents a remote dropbox server
@@ -392,15 +372,14 @@ func (f *Fs) setRoot(root string) {
// getMetadata gets the metadata for a file or directory
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
err = f.pacer.Call(func() (bool, error) {
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
Path: f.opt.Enc.FromStandardPath(objPath),
})
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
return shouldRetry(err)
})
if err != nil {
switch e := err.(type) {
case files.GetMetadataAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
notFound = true
err = nil
}
@@ -487,7 +466,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for {
if !started {
arg := files.ListFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Path: root,
Recursive: false,
}
if root == "/" {
@@ -500,7 +479,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
switch e := err.(type) {
case files.ListFolderAPIError:
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
switch e.EndpointError.Path.Tag {
case files.LookupErrorNotFound:
err = fs.ErrorDirNotFound
}
}
@@ -537,7 +517,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Only the last element is reliably cased in PathDisplay
entryPath := metadata.PathDisplay
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
leaf := path.Base(entryPath)
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now())
@@ -595,7 +575,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// create it
arg2 := files.CreateFolderArg{
Path: f.opt.Enc.FromStandardPath(root),
Path: root,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.CreateFolderV2(&arg2)
@@ -621,7 +601,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errors.Wrap(err, "Rmdir")
}
root = f.opt.Enc.FromStandardPath(root)
// check directory empty
arg := files.ListFolderArg{
Path: root,
@@ -678,12 +657,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Copy
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcObj.remotePath()
arg.ToPath = dstObj.remotePath()
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
@@ -715,9 +691,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
func (f *Fs) Purge(ctx context.Context) (err error) {
// Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
})
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
return shouldRetry(err)
})
return err
@@ -746,12 +720,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcObj.remotePath()
arg.ToPath = dstObj.remotePath()
var err error
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
@@ -776,7 +747,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{
Path: absPath,
@@ -787,8 +758,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
return shouldRetry(err)
})
if err != nil && strings.Contains(err.Error(),
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
listArg := sharing.ListSharedLinksArg{
Path: absPath,
@@ -850,12 +820,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// ...apparently not necessary
// Do the move
arg := files.RelocationArg{
RelocationPath: files.RelocationPath{
FromPath: f.opt.Enc.FromStandardPath(srcPath),
ToPath: f.opt.Enc.FromStandardPath(dstPath),
},
}
arg := files.RelocationArg{}
arg.FromPath = srcPath
arg.ToPath = dstPath
err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.MoveV2(&arg)
return shouldRetry(err)
@@ -896,7 +863,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
return hash.Set(hash.Dropbox)
}
// ------------------------------------------------------------
@@ -921,7 +888,7 @@ func (o *Object) Remote() string {
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != DbHashType {
if t != hash.Dropbox {
return "", hash.ErrUnsupported
}
err := o.readMetaData()
@@ -1008,12 +975,8 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
ExtraHeaders: headers,
}
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.srv.Download(&arg)
return shouldRetry(err)
@@ -1022,7 +985,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
switch e := err.(type) {
case files.DownloadAPIError:
// Don't attempt to retry copyright violation errors
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
return nil, fserrors.NoRetryError(err)
}
}
@@ -1123,13 +1086,6 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
@@ -1147,9 +1103,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
fs.Logf(o, "File name disallowed - not uploading")
return nil
}
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
@@ -1174,9 +1131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
})
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
return shouldRetry(err)
})
return err

View File

@@ -32,7 +32,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
@@ -44,7 +44,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
resp, err := f.rest.CallJSON(&opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
@@ -72,7 +72,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
@@ -88,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
@@ -101,21 +101,17 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
resp, err := f.rest.CallJSON(&opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
for i := range filesList.Items {
item := &filesList.Items[i]
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
}
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
@@ -129,17 +125,12 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
for i := range foldersList.SubFolders {
folder := &foldersList.SubFolders[i]
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
@@ -162,12 +153,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err
}
files, err := f.listFiles(ctx, folderID)
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
folders, err := f.listFolders(folderID)
if err != nil {
return nil, err
}
@@ -184,6 +175,7 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err
}
folder.Name = restoreReservedChars(folder.Name)
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
@@ -212,8 +204,8 @@ func getRemote(dir, fileName string) string {
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := f.opt.Enc.FromStandardName(leaf)
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
@@ -228,7 +220,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
resp, err := f.rest.CallJSON(&opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -240,7 +232,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
@@ -255,7 +247,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
resp, err = f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -270,7 +262,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
@@ -284,7 +276,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
resp, err := f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
@@ -297,7 +289,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
@@ -308,7 +300,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -320,11 +312,9 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
@@ -350,7 +340,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
resp, err := f.rest.CallJSON(&opts, nil, nil)
return shouldRetry(resp, err)
})
@@ -363,7 +353,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
@@ -384,7 +374,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})

View File

@@ -11,13 +11,11 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -37,48 +35,25 @@ func init() {
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
}, {
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Characters that need escaping
//
// '\\': '', // FULLWIDTH REVERSE SOLIDUS
// '<': '', // FULLWIDTH LESS-THAN SIGN
// '>': '', // FULLWIDTH GREATER-THAN SIGN
// '"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
// '\'': '', // FULLWIDTH APOSTROPHE
// '$': '', // FULLWIDTH DOLLAR SIGN
// '`': '', // FULLWIDTH GRAVE ACCENT
//
// Leading space and trailing space
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeDoubleQuote |
encoder.EncodeLtGt |
encoder.EncodeDollar |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}},
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
@@ -86,9 +61,9 @@ type Fs struct {
root string
name string
features *fs.Features
opt Options
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
@@ -99,7 +74,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
folders, err := f.listFolders(folderID)
if err != nil {
return "", false, err
}
@@ -120,7 +95,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
resp, err := f.makeFolder(leaf, folderID)
if err != nil {
return "", err
}
@@ -166,7 +141,8 @@ func (f *Fs) Features() *fs.Features {
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
root := replaceReservedChars(rootleaf)
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
@@ -184,7 +160,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
opt: *opt,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
@@ -198,7 +174,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
@@ -248,8 +224,8 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFolder != "" {
return f.listSharedFiles(ctx, f.opt.SharedFolder)
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
@@ -275,7 +251,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
@@ -322,13 +298,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
if size > int64(100E9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
nodeResponse, err := f.getUploadNode()
if err != nil {
return nil, err
}
@@ -338,12 +314,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
@@ -370,7 +346,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: fileSize,
Size: int(fileSize),
URL: link.Download,
},
}, nil
@@ -417,7 +393,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
_, err = f.removeFolder(dir, folderID)
if err != nil {
return err
}

View File

@@ -43,7 +43,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.file.Size
return int64(o.file.Size)
}
// Fs returns read only access to the Fs that this object is part of
@@ -74,8 +74,8 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, o.file.Size)
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
if err != nil {
return nil, err
@@ -89,7 +89,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
resp, err = o.fs.rest.Call(&opts)
return shouldRetry(resp, err)
})
@@ -131,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
_, err := o.fs.deleteFile(o.file.URL)
if err != nil {
return err

View File

@@ -0,0 +1,71 @@
/*
Translate file names for 1fichier
1Fichier reserved characters
The following characters are 1Fichier reserved characters, and can't
be used in 1Fichier folder and file names.
*/
package fichier
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// 1Fichier has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'$': '', // FULLWIDTH DOLLAR SIGN
'`': '', // FULLWIDTH GRAVE ACCENT
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,24 @@
package fichier
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{"\"'<>/\\$`", `/`},
{" leading space", "␠leading space"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -69,7 +69,7 @@ type SharedFolderResponse []SharedFile
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int64 `json:"size"`
Size int `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
@@ -93,7 +93,7 @@ type File struct {
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int64 `json:"size"`
Size int `json:"size"`
URL string `json:"url"`
}

View File

@@ -14,12 +14,10 @@ import (
"github.com/jlaffaye/ftp"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
@@ -30,70 +28,54 @@ func init() {
Name: "ftp",
Description: "FTP Connection",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, {
Name: "disable_epsv",
Help: "Disable using EPSV even if server advertises support",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// The FTP protocal can't handle trailing spaces (for instance
// pureftpd turns them into _)
//
// proftpd can't handle '*' in file names
// pureftpd can't handle '[', ']' or '*'
Default: (encoder.Display |
encoder.EncodeRightSpace),
}},
Options: []fs.Option{
{
Name: "host",
Help: "FTP host to connect to",
Required: true,
Examples: []fs.OptionExample{{
Value: "ftp.example.com",
Help: "Connect to ftp.example.com",
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
}, {
Name: "pass",
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
Enc encoder.MultiEncoder `config:"encoding"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
}
// Fs represents a remote FTP server
@@ -159,9 +141,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
@@ -190,11 +169,7 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if c != nil {
return c, nil
}
c, err = f.ftpConnection()
if err != nil && f.opt.Concurrency > 0 {
f.tokens.Put()
}
return c, err
return f.ftpConnection()
}
// Return an FTP connection to the pool
@@ -207,13 +182,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
if pc == nil {
return
}
c := *pc
if c == nil {
return
}
*pc = nil
if err != nil {
// If not a regular FTP error code then check the connection
@@ -326,37 +295,10 @@ func translateErrorDir(err error) error {
return err
}
// entryToStandard converts an incoming ftp.Entry to Standard encoding
func (f *Fs) entryToStandard(entry *ftp.Entry) {
// Skip . and .. as we don't want these encoded
if entry.Name == "." || entry.Name == ".." {
return
}
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
}
// dirFromStandardPath returns dir in encoded form.
func (f *Fs) dirFromStandardPath(dir string) string {
// Skip . and .. as we don't want these encoded
if dir == "." || dir == ".." {
return dir
}
return f.opt.Enc.FromStandardPath(dir)
}
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
Type: ftp.EntryTypeFolder,
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
@@ -364,13 +306,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
if err != nil {
return nil, errors.Wrap(err, "findItem")
}
files, err := c.List(f.dirFromStandardPath(dir))
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for _, file := range files {
f.entryToStandard(file)
if file.Name == base {
return file, nil
}
@@ -425,7 +366,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection()
if err != nil {
return nil, errors.Wrap(err, "list")
@@ -437,7 +378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
result, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
@@ -474,7 +415,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for i := range files {
object := files[i]
f.entryToStandard(object)
newremote := path.Join(dir, object.Name)
switch object.Type {
case ftp.EntryTypeFolder:
@@ -544,21 +484,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
if err != nil {
return nil, errors.Wrap(err, "getInfo")
}
files, err := c.List(f.dirFromStandardPath(dir))
files, err := c.List(dir)
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
if files[i].Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
IsDir: file.Type == ftp.EntryTypeFolder,
Size: files[i].Size,
ModTime: files[i].Time,
IsDir: files[i].Type == ftp.EntryTypeFolder,
}
return info, nil
}
@@ -568,7 +506,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
// mkdir makes the directory and parents using unrooted paths
func (f *Fs) mkdir(abspath string) error {
abspath = path.Clean(abspath)
if abspath == "." || abspath == "/" {
return nil
}
@@ -590,7 +527,7 @@ func (f *Fs) mkdir(abspath string) error {
if connErr != nil {
return errors.Wrap(connErr, "mkdir")
}
err = c.MakeDir(f.dirFromStandardPath(abspath))
err = c.MakeDir(abspath)
f.putFtpConnection(&c, err)
switch errX := err.(type) {
case *textproto.Error:
@@ -626,7 +563,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir")
}
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
err = c.RemoveDir(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
return translateErrorDir(err)
}
@@ -647,8 +584,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "Move")
}
err = c.Rename(
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
path.Join(srcObj.fs.root, srcObj.remote),
path.Join(f.root, remote),
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -701,8 +638,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.Wrap(err, "DirMove")
}
err = c.Rename(
f.dirFromStandardPath(srcPath),
f.dirFromStandardPath(dstPath),
srcPath,
dstPath,
)
f.putFtpConnection(&c, err)
if err != nil {
@@ -788,23 +725,19 @@ func (f *ftpReadCloser) Close() error {
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f.f, "Timeout when waiting for connection Close")
f.f.putFtpConnection(nil, nil)
return nil
}
// if errors while reading or closing, dump the connection
if err != nil || f.err != nil {
_ = f.c.Quit()
f.f.putFtpConnection(nil, nil)
} else {
f.f.putFtpConnection(&f.c, nil)
}
// mask the error if it was caused by a premature close
// NB StatusAboutToSend is to work around a bug in pureftpd
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
err = nil
}
}
@@ -832,7 +765,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
if err != nil {
return nil, errors.Wrap(err, "open")
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
fd, err := c.RetrFrom(path, uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, errors.Wrap(err, "open")
@@ -867,11 +800,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "Update")
}
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
err = c.Stor(path, in)
if err != nil {
_ = c.Quit() // toss this connection to avoid sync errors
remove()
o.fs.putFtpConnection(nil, err)
return errors.Wrap(err, "update stor")
}
o.fs.putFtpConnection(&c, nil)
@@ -898,7 +830,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err != nil {
return errors.Wrap(err, "Remove")
}
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
err = c.Delete(path)
o.fs.putFtpConnection(&c, err)
}
return err

View File

@@ -5,44 +5,13 @@ import (
"testing"
"github.com/rclone/rclone/backend/ftp"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPProftpd:",
RemoteName: "TestFTP:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPRclone:",
NilObject: (*ftp.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFTPPureftpd:",
NilObject: (*ftp.Object)(nil),
})
}
// func TestIntegration4(t *testing.T) {
// if *fstest.RemoteName != "" {
// t.Skip("skipping as -remote is set")
// }
// fstests.Run(t, &fstests.Opt{
// RemoteName: "TestFTPVsftpd:",
// NilObject: (*ftp.Object)(nil),
// })
// }

View File

@@ -23,7 +23,9 @@ import (
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -36,8 +38,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -246,42 +246,34 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
Enc encoder.MultiEncoder `config:"encoding"`
ProjectNumber string `config:"project_number"`
ServiceAccountFile string `config:"service_account_file"`
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *fs.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -306,18 +298,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
@@ -349,24 +341,21 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
@@ -376,15 +365,8 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client
// Parse config into Options struct
@@ -424,19 +406,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -446,19 +431,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.rootBucket != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -469,7 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -477,7 +463,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
@@ -488,7 +474,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list to handle an object.
@@ -499,24 +485,20 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
if directory != "" {
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
@@ -529,38 +511,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
if !recurse {
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
remote := object.Name[rootLength:]
// is this a directory marker?
if isDirectory && object.Size == 0 {
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -577,23 +552,32 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
@@ -606,12 +590,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
@@ -619,14 +606,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range buckets.Items {
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
}
if buckets.NextPageToken == "" {
@@ -647,14 +634,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -674,43 +657,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
@@ -735,55 +697,58 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return f.cache.Create(bucket, func() error {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
return f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(err)
})
}, nil)
_, err = insertBucket.Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
@@ -791,16 +756,19 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(err)
})
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
@@ -818,8 +786,7 @@ func (f *Fs) Precision() time.Duration {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -828,7 +795,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction
dstObj := &Object{
@@ -836,13 +802,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
@@ -925,33 +891,24 @@ func (o *Object) setMetaData(info *storage.Object) {
}
}
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
return fs.ErrorObjectNotFound
}
}
return nil, err
}
return object, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
}
o.setMetaData(object)
@@ -963,7 +920,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -980,27 +937,15 @@ func metadataFromModTime(modTime time.Time) map[string]string {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// read the complete existing object first
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
@@ -1021,8 +966,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1050,26 +993,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
modTime := src.ModTime(ctx)
object := storage.Object{
Bucket: bucket,
Name: bucketPath,
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
newObject, err = insertObject.Do()
return shouldRetry(err)
})
if err != nil {
@@ -1082,9 +1024,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/oauthutil"
@@ -61,8 +60,6 @@ var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite,
},
Endpoint: google.Endpoint,
@@ -146,20 +143,18 @@ type Options struct {
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the one drive server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
albumsMu sync.Mutex // protect albums (but not contents)
albums map[bool]*albums // albums, shared or not
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
albumsMu sync.Mutex // protect albums (but not contents)
albums map[bool]*albums // albums, shared or not
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
}
// Object describes a storage object
@@ -246,8 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
@@ -256,14 +250,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if root == "." || root == "/" {
root = ""
}
f := &Fs{
name: name,
root: root,
opt: *opt,
unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
@@ -289,85 +280,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// fetchEndpoint gets the openid endpoint named from the Google config
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) {
// Get openID config without auth
opts := rest.Opts{
Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
}
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
}
// UserInfo fetches info about the current user with oauth2
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint")
if err != nil {
return nil, err
}
// Fetch the user info with auth
opts := rest.Opts{
Method: "GET",
RootURL: endpoint,
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
}
return userInfo, nil
}
// Disconnect kills the token and refresh token
func (f *Fs) Disconnect(ctx context.Context) (err error) {
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint")
if err != nil {
return err
}
token, err := f.ts.Token()
if err != nil {
return err
}
// Revoke the token and the refresh token
opts := rest.Opts{
Method: "POST",
RootURL: endpoint,
MultipartParams: url.Values{
"token": []string{token.AccessToken},
"token_type_hint": []string{"access_token"},
},
}
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
}
fs.Infof(f, "res = %+v", res)
return nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -423,7 +335,7 @@ func findID(name string) string {
// list the albums into an internal cache
// FIXME cache invalidation
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
f.albumsMu.Lock()
defer f.albumsMu.Unlock()
all, ok := f.albums[shared]
@@ -445,7 +357,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var result api.ListAlbums
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -482,7 +394,7 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) {
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:search",
@@ -494,7 +406,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
var result api.MediaItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
resp, err = f.srv.CallJSON(&opts, &filter, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -543,7 +455,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
if err != nil {
return err
@@ -638,7 +550,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var result api.Album
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
resp, err = f.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -654,7 +566,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
f.createMu.Lock()
defer f.createMu.Unlock()
albums, err := f.listAlbums(ctx, false)
albums, err := f.listAlbums(false)
if err != nil {
return nil, err
}
@@ -708,7 +620,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return err
}
albumTitle := match[1]
allAlbums, err := f.listAlbums(ctx, false)
allAlbums, err := f.listAlbums(false)
if err != nil {
return err
}
@@ -773,7 +685,7 @@ func (o *Object) Size() int64 {
RootURL: o.downloadURL(),
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -824,7 +736,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var item api.MediaItem
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
return shouldRetry(resp, err)
})
if err != nil {
@@ -901,7 +813,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -954,8 +866,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var token []byte
var resp *http.Response
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
if err != nil {
_ = resp.Body.Close()
return shouldRetry(resp, err)
}
token, err = rest.ReadBody(resp)
@@ -986,7 +899,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1003,9 +916,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploadedMu.Lock()
o.fs.uploaded.AddEntry(o)
o.fs.uploadedMu.Unlock()
}
return nil
}
@@ -1031,7 +942,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1052,10 +963,8 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.UserInfoer = &Fs{}
_ fs.Disconnecter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -12,9 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -27,6 +25,17 @@ const (
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
@@ -46,7 +55,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
albumName := "album/rclone-test-" + fstest.RandomString(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
@@ -56,7 +65,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -221,7 +230,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -20,7 +20,7 @@ import (
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listAlbums(shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
@@ -296,7 +296,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
albums, err := f.listAlbums(shared)
if err != nil {
return nil, err
}

View File

@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
return f.albums, nil
}

View File

@@ -13,7 +13,6 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -47,21 +46,6 @@ func init() {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
@@ -78,26 +62,6 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -105,10 +69,8 @@ If you set this option, rclone will not do the HEAD request. This will mean
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
}
// Fs stores the interface to the remote HTTP files
@@ -146,7 +108,6 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -154,10 +115,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
@@ -183,15 +140,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
isFile = true
}
}
@@ -261,7 +213,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.stat()
if err != nil {
return nil, err
}
@@ -364,22 +316,8 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
@@ -388,14 +326,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
res, err := f.httpClient.Get(URL)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
@@ -433,53 +364,34 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(ctx, dir)
names, err := f.readDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, timeUnset))
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else {
in <- remote
file := &Object{
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}
close(in)
wg.Wait()
return entries, nil
}
@@ -536,21 +448,9 @@ func (o *Object) url() string {
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
func (o *Object) stat() error {
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
@@ -597,13 +497,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
o.fs.addHeaders(req)
// Do the request
res, err := o.fs.httpClient.Do(req)

View File

@@ -10,7 +10,6 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
@@ -27,7 +26,6 @@ var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,16 +33,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
@@ -55,9 +45,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
"headers": strings.Join(headers, ","),
"type": "http",
"url": ts.URL,
}
// return a function to tidy up

View File

@@ -1,7 +1,6 @@
package hubic
import (
"context"
"net/http"
"time"
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
err = a.f.getCredentials()
if err == nil {
break
}

View File

@@ -7,7 +7,6 @@ package hubic
// to be revisted after some actual experience.
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -116,12 +115,11 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err

View File

@@ -46,126 +46,6 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// WellKnown contains some configuration parameters for setting up endpoints
type WellKnown struct {
Issuer string `json:"issuer"`
AuthorizationEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
UserinfoEndpoint string `json:"userinfo_endpoint"`
EndSessionEndpoint string `json:"end_session_endpoint"`
JwksURI string `json:"jwks_uri"`
CheckSessionIframe string `json:"check_session_iframe"`
GrantTypesSupported []string `json:"grant_types_supported"`
ResponseTypesSupported []string `json:"response_types_supported"`
SubjectTypesSupported []string `json:"subject_types_supported"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
ResponseNodesSupported []string `json:"response_modes_supported"`
RegistrationEndpoint string `json:"registration_endpoint"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
ClaimsSupported []string `json:"claims_supported"`
ClaimTypesSupported []string `json:"claim_types_supported"`
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
ScopesSupported []string `json:"scopes_supported"`
RequestParameterSupported bool `json:"request_parameter_supported"`
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
IntrospectionEndpoint string `json:"introspection_endpoint"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
type Flag bool
@@ -184,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
@@ -213,8 +102,8 @@ GET http://www.jottacloud.com/JFS/<account>
</user>
*/
// DriveInfo represents a Jottacloud account
type DriveInfo struct {
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
@@ -391,3 +280,43 @@ func (e *Error) Error() string {
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

View File

@@ -4,13 +4,12 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
@@ -26,11 +25,11 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
@@ -39,29 +38,33 @@ import (
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configVersion = 1
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/files/v1/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
AuthURL: tokenURL,
TokenURL: tokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
@@ -75,67 +78,156 @@ func init() {
Description: "JottaCloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
refresh = ver != configVersion
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
srv := rest.NewClient(fshttp.NewClient(fs.Config))
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration api.DeviceRegistrationResponse
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
username, ok := m.Get(configUsername)
if !ok {
log.Fatalf("No username defined")
}
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
}
}
if err != nil {
log.Fatalf("Failed to get resource token: %v", err)
}
}
clientConfig := *fs.Config
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
var token oauth2.Token
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
// finally save them in the config
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
acc, err := getAccountInfo(srv, username)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
log.Fatalf("Error getting devices: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
result := config.Choose("Devices", deviceNames, nil, false)
m.Set(configDevice, result)
m.Set("configVersion", strconv.Itoa(configVersion))
dev, err := getDeviceInfo(srv, path.Join(username, result))
if err != nil {
log.Fatalf("Error getting Mountpoint: %s", err)
}
if len(dev.MountPoints) == 0 {
log.Fatalf("No Mountpoints found for this device.")
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
result = config.Choose("Mountpoints", mountpointNames, nil, false)
m.Set(configMountpoint, result)
}
},
Options: []fs.Option{{
Name: configUsername,
Help: "User Name:",
}, {
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -155,29 +247,19 @@ func init() {
Help: "Files bigger than this can be resumed if the upload fail's.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as xml doesn't handle them properly.
//
// Also: '*', '/', ':', '<', '>', '?', '\"', '\x00', '|'
Default: (encoder.Display |
encoder.EncodeWin | // :?"*<>|
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
Enc encoder.MultiEncoder `config:"encoding"`
User string `config:"user"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
}
// Fs represents a remote jottacloud
@@ -251,159 +333,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
// decode login token
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
// retrieve endpoint urls
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
return info, nil
}
// getDriveInfo queries general information about the account and the available devices and mountpoints.
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: username,
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get drive info")
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get device info")
}
return info, nil
}
// setEndpointURL generates the API endpoint URL
func (f *Fs) setEndpointURL() {
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(path),
@@ -411,7 +342,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var result api.JottaFile
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -431,6 +362,54 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
return &result, nil
}
// getAccountInfo queries general information about the account.
// Takes rest.Client and username as parameter to be easily usable
// during config
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(username),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL() (err error) {
info, err := getAccountInfo(f.srv, f.user)
if err != nil {
return errors.Wrap(err, "failed to get endpoint url")
}
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
return nil
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
@@ -455,7 +434,7 @@ func urlPathEscape(in string) string {
// filePathRaw returns an unescaped file path (f.root, file)
func (f *Fs) filePathRaw(file string) string {
return path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, file)))
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
}
// filePath returns a escaped file path (f.root, file)
@@ -463,9 +442,36 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -473,40 +479,39 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
// Check config version
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = ver == configVersion
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
clientID = rcloneClientID
}
// if custome endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// Create OAuth Client
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
f := &Fs{
name: name,
root: root,
user: opt.User,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
@@ -522,16 +527,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
_, err := f.readMetaDataForPath("")
return err
})
cust, err := getCustomerInfo(ctx, f.apiSrv)
err = f.setEndpointURL()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "couldn't get account info")
}
f.user = cust.Username
f.setEndpointURL()
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
@@ -558,7 +561,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -568,7 +571,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
err = o.readMetaData(false) // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -579,11 +582,11 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// CreateDir makes a directory
func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
opts := rest.Opts{
@@ -595,7 +598,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
resp, err = f.srv.CallXML(&opts, nil, &jf)
return shouldRetry(resp, err)
})
if err != nil {
@@ -616,6 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", f.filePath(dir))
opts := rest.Opts{
Method: "GET",
Path: f.filePath(dir),
@@ -624,7 +628,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var resp *http.Response
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -647,7 +651,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if item.Deleted {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
remote := path.Join(dir, restoreReservedChars(item.Name))
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
entries = append(entries, d)
}
@@ -657,13 +661,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if item.Deleted || item.State != "COMPLETED" {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
remote := path.Join(dir, restoreReservedChars(item.Name))
o, err := f.newObjectWithInfo(remote, item)
if err != nil {
continue
}
entries = append(entries, o)
}
//fmt.Printf("Entries: %+v\n", entries)
return entries, nil
}
@@ -672,7 +677,7 @@ type listFileDirFn func(fs.DirEntry) error
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
@@ -682,7 +687,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if folder.Deleted {
return nil
}
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
@@ -700,8 +705,8 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
if file.Deleted || file.State != "COMPLETED" {
continue
}
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
o, err := f.newObjectWithInfo(remoteFile, file)
if err != nil {
return err
}
@@ -719,6 +724,17 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
opts := rest.Opts{
Method: "GET",
@@ -730,7 +746,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -743,7 +759,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return errors.Wrap(err, "couldn't list files")
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
@@ -797,7 +813,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.CreateDir(ctx, dir)
_, err := f.CreateDir(dir)
return err
}
@@ -836,13 +852,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't purge directory")
}
// TODO: Parse response?
return nil
}
@@ -859,23 +876,27 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: src,
Parameters: url.Values{},
}
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(path.Join(f.root, dest))))
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
resp, err = f.srv.CallXML(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -904,13 +925,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, &result)
}
@@ -934,13 +955,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, result)
}
@@ -978,7 +999,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
@@ -1003,7 +1024,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var resp *http.Response
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -1034,7 +1055,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := getDriveInfo(ctx, f.srv, f.user)
info, err := getAccountInfo(f.srv, f.user)
if err != nil {
return nil, err
}
@@ -1074,11 +1095,6 @@ func (o *Object) Remote() string {
return o.remote
}
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
@@ -1089,8 +1105,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx, false)
err := o.readMetaData(false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -1113,12 +1128,11 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
return nil
}
// readMetaData reads and updates the metadata for an object
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
func (o *Object) readMetaData(force bool) (err error) {
if o.hasMetaData && !force {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
return err
}
@@ -1133,7 +1147,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx, false)
err := o.readMetaData(false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1165,7 +1179,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1258,7 +1272,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Path: "allocate",
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1269,13 +1283,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
}
// send it
var response api.AllocateFileResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1306,7 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// send the remaining bytes
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
if err != nil {
return err
}
@@ -1317,8 +1331,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true)
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
return o.readMetaData(true)
}
return nil
@@ -1340,7 +1354,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
return shouldRetry(resp, err)
})
}

View File

@@ -0,0 +1,77 @@
/*
Translate file names for JottaCloud adapted from OneDrive
The following characters are JottaCloud reserved characters, and can't
be used in JottaCloud folder and file names.
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
*/
package jottacloud
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
';': '', // FULLWIDTH SEMICOLON
'|': '', // FULLWIDTH VERTICAL LINE
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package jottacloud
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:;|"`, ``},
{`\*<>?:;|"\*<>?:;|"`, ``},
{" leading space", "␠leading space"},
{"trailing space ", "trailing space␠"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -12,13 +12,10 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
@@ -30,53 +27,46 @@ func init() {
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
Enc encoder.MultiEncoder `config:"encoding"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
SetMTime bool `config:"setmtime"`
}
// A Fs is a representation of a remote Koofr Fs
@@ -164,7 +154,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -181,6 +170,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
@@ -252,7 +248,7 @@ func (f *Fs) Hashes() hash.Set {
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
@@ -266,9 +262,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil {
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
@@ -305,7 +299,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
@@ -323,14 +317,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
if file.Type == "dir" {
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: remote,
remote: path.Join(dir, file.Name),
}
}
}

View File

View File

@@ -1,11 +0,0 @@
//+build darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for macOS
//
// macOS can't store invalid UTF-8, it converts them into %XX encoding
const defaultEnc = (encoder.Base |
encoder.EncodeInvalidUtf8)

View File

@@ -1,8 +0,0 @@
//+build !windows,!darwin
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for non windows platforms
const defaultEnc = encoder.Base

View File

@@ -1,33 +0,0 @@
//+build windows
package local
import "github.com/rclone/rclone/lib/encoder"
// This is the encoding used by the local backend for windows platforms
//
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
//
// Additionally names can't end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
//
// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions
const defaultEnc = (encoder.Base |
encoder.EncodeWin |
encoder.EncodeBackSlash |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
encoder.EncodeInvalidUtf8)

View File

@@ -1,12 +0,0 @@
//+build !linux
package local
import (
"io"
"os"
)
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
return f
}

View File

@@ -1,165 +0,0 @@
//+build linux
package local
import (
"io"
"os"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
// fadvise provides means to automate freeing pages in kernel page cache for
// a given file descriptor as the file is sequentially processed (read or
// written).
//
// When copying a file to a remote backend all the file content is read by
// kernel and put to page cache to make future reads faster.
// This causes memory pressure visible in both memory usage and CPU consumption
// and can even cause OOM errors in applications consuming large amounts memory.
//
// In case of an upload to a remote backend, there is no benefits from caching.
//
// fadvise would orchestrate calling POSIX_FADV_DONTNEED
//
// POSIX_FADV_DONTNEED attempts to free cached pages associated
// with the specified region. This is useful, for example, while
// streaming large files. A program may periodically request the
// kernel to free cached data that has already been used, so that
// more useful cached pages are not discarded instead.
//
// Requests to discard partial pages are ignored. It is
// preferable to preserve needed data than discard unneeded data.
// If the application requires that data be considered for
// discarding, then offset and len must be page-aligned.
//
// The implementation may attempt to write back dirty pages in
// the specified region, but this is not guaranteed. Any
// unwritten dirty pages will not be freed. If the application
// wishes to ensure that dirty pages will be released, it should
// call fsync(2) or fdatasync(2) first.
type fadvise struct {
o *Object
fd int
lastPos int64
curPos int64
windowSize int64
freePagesCh chan offsetLength
doneCh chan struct{}
}
type offsetLength struct {
offset int64
length int64
}
const (
defaultAllowPages = 32
defaultWorkerQueueSize = 64
)
func newFadvise(o *Object, fd int, offset int64) *fadvise {
f := &fadvise{
o: o,
fd: fd,
lastPos: offset,
curPos: offset,
windowSize: int64(os.Getpagesize()) * defaultAllowPages,
freePagesCh: make(chan offsetLength, defaultWorkerQueueSize),
doneCh: make(chan struct{}),
}
go f.worker()
return f
}
// sequential configures readahead strategy in Linux kernel.
//
// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the
// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles
// this size, and POSIX_FADV_RANDOM disables file readahead entirely.
func (f *fadvise) sequential(limit int64) bool {
l := int64(0)
if limit > 0 {
l = limit
}
if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil {
fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err)
return false
}
return true
}
func (f *fadvise) next(n int) {
f.curPos += int64(n)
f.freePagesIfNeeded()
}
func (f *fadvise) freePagesIfNeeded() {
if f.curPos >= f.lastPos+f.windowSize {
f.freePages()
}
}
func (f *fadvise) freePages() {
f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos}
f.lastPos = f.curPos
}
func (f *fadvise) worker() {
for p := range f.freePagesCh {
if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil {
fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err)
}
}
close(f.doneCh)
}
func (f *fadvise) wait() {
close(f.freePagesCh)
<-f.doneCh
}
type fadviseReadCloser struct {
*fadvise
inner io.ReadCloser
}
// newFadviseReadCloser wraps os.File so that reading from that file would
// remove already consumed pages from kernel page cache.
// In addition to that it instructs kernel to double the readahead window to
// make sequential reads faster.
// See also fadvise.
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
r := fadviseReadCloser{
fadvise: newFadvise(o, int(f.Fd()), offset),
inner: f,
}
// If syscall failed it's likely that the subsequent syscalls to that
// file descriptor would also fail. In that case return the provided os.File
// pointer.
if !r.sequential(limit) {
r.wait()
return f
}
return r
}
func (f fadviseReadCloser) Read(p []byte) (n int, err error) {
n, err = f.inner.Read(p)
f.next(n)
return
}
func (f fadviseReadCloser) Close() error {
f.freePages()
f.wait()
return f.inner.Close()
}

View File

@@ -20,12 +20,10 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
)
@@ -117,11 +115,6 @@ Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: defaultEnc,
}},
}
fs.Register(fsi)
@@ -129,16 +122,15 @@ to override the default choice.`,
// Options defines the configuration for this backend
type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
Enc encoder.MultiEncoder `config:"encoding"`
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
}
// Fs represents a local filesystem rooted at root
@@ -150,19 +142,19 @@ type Fs struct {
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
wmu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
dirNames *mapper // directory name mapping
objectHashesMu sync.Mutex // global lock for Object.hashes
}
// Object represents a local filesystem object
type Object struct {
fs *Fs // The Fs this object is part of
remote string // The remote path (encoded path)
path string // The local path (OS path)
remote string // The remote path - properly UTF-8 encoded - for rclone
path string // The local path - may not be properly UTF-8 encoded - for OS
size int64 // file metadata - always present
mode os.FileMode
modTime time.Time
@@ -191,17 +183,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
f := &Fs{
name: name,
opt: *opt,
warned: make(map[string]struct{}),
dev: devUnset,
lstat: os.Lstat,
name: name,
opt: *opt,
warned: make(map[string]struct{}),
dev: devUnset,
lstat: os.Lstat,
dirNames: newMapper(),
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.root = f.cleanPath(root)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -242,12 +234,12 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root))
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Local file system at %s", f.Root())
return fmt.Sprintf("Local file system at %s", f.root)
}
// Features returns the optional features of this Fs
@@ -275,27 +267,33 @@ func (f *Fs) caseInsensitive() bool {
// and returns a new path, removing the suffix as needed,
// It also returns whether this is a translated link at all
//
// for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
// for regular files, dstPath is returned unchanged
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
return newDstPath, isTranslatedLink
}
// newObject makes a half completed Object
func (f *Fs) newObject(remote string) *Object {
//
// if dstPath is empty then it is made from remote
func (f *Fs) newObject(remote, dstPath string) *Object {
translatedLink := false
localPath := f.localPath(remote)
if dstPath == "" {
dstPath = f.cleanPath(filepath.Join(f.root, remote))
}
remote = f.cleanRemote(remote)
if f.opt.TranslateSymlinks {
// Possibly receive a new name for localPath
localPath, translatedLink = translateLink(remote, localPath)
// Possibly receive a new name for dstPath
dstPath, translatedLink = translateLink(remote, dstPath)
}
return &Object{
fs: f,
remote: remote,
path: localPath,
path: dstPath,
translatedLink: translatedLink,
}
}
@@ -303,8 +301,8 @@ func (f *Fs) newObject(remote string) *Object {
// Return an Object from a path
//
// May return nil if an error occurred
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote)
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
o := f.newObject(remote, dstPath)
if info != nil {
o.setMetadata(info)
} else {
@@ -333,7 +331,7 @@ func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, erro
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
return f.newObjectWithInfo(remote, "", nil)
}
// List the objects and directories in dir into entries. The
@@ -346,7 +344,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fsDirPath := f.localPath(dir)
dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
remote := f.cleanRemote(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
return nil, fs.ErrorDirNotFound
@@ -358,7 +359,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
@@ -394,7 +395,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
@@ -408,16 +409,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for _, fi := range fis {
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
newRemote := path.Join(remote, name)
newPath := filepath.Join(fsDirPath, name)
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
fi, err = os.Stat(newPath)
if os.IsNotExist(err) {
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
accounting.Stats(ctx).Error(err)
continue
}
if err != nil {
@@ -429,7 +430,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := fs.NewDir(newRemote, fi.ModTime())
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
entries = append(entries, d)
}
} else {
@@ -437,7 +438,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += linkSuffix
}
fso, err := f.newObjectWithInfo(newRemote, fi)
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
if err != nil {
return nil, err
}
@@ -450,28 +451,67 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return entries, nil
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
f.warnedMu.Lock()
if _, ok := f.warned[remote]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote)
f.warned[remote] = struct{}{}
// cleanRemote makes string a valid UTF-8 string for remote strings.
//
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
// It also normalises the UTF-8 and converts the slashes if necessary.
func (f *Fs) cleanRemote(name string) string {
if !utf8.ValidString(name) {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
f.warned[name] = struct{}{}
}
f.warnedMu.Unlock()
f.wmu.Unlock()
name = string([]rune(name))
}
return
name = filepath.ToSlash(name)
return name
}
func (f *Fs) localPath(name string) string {
return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name)))
// mapper maps raw to cleaned directory names
type mapper struct {
mu sync.RWMutex // mutex to protect the below
m map[string]string // map of un-normalised directory names
}
func newMapper() *mapper {
return &mapper{
m: make(map[string]string),
}
}
// Lookup a directory name to make a local name (reverses
// cleanDirName)
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Load(in string) string {
m.mu.RLock()
out, ok := m.m[in]
m.mu.RUnlock()
if ok {
return out
}
return in
}
// Cleans a directory name recording if it needed to be altered
//
// FIXME this is temporary before we make a proper Directory object
func (m *mapper) Save(in, out string) string {
if in != out {
m.mu.Lock()
m.m[out] = in
m.mu.Unlock()
}
return out
}
// Put the Object to the local filesystem
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote()
// Temporary Object under construction - info filled in by Update()
o := f.newObject(src.Remote())
o := f.newObject(remote, "")
err := o.Update(ctx, in, src, options...)
if err != nil {
return nil, err
@@ -487,13 +527,13 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
localPath := f.localPath(dir)
err := os.MkdirAll(localPath, 0777)
root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777)
if err != nil {
return err
}
if dir == "" {
fi, err := f.lstat(localPath)
fi, err := f.lstat(root)
if err != nil {
return err
}
@@ -506,7 +546,8 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return os.Remove(f.localPath(dir))
root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root)
}
// Precision of the file system
@@ -602,7 +643,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Temporary Object under construction
dstObj := f.newObject(remote)
dstObj := f.newObject(remote, "")
// Check it is a file if it exists
err := dstObj.lstat()
@@ -659,8 +700,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
srcPath := srcFs.localPath(srcRemote)
dstPath := f.localPath(dstRemote)
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
// Check if destination exists
_, err := os.Lstat(dstPath)
@@ -694,7 +735,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Supported()
return hash.Supported
}
// ------------------------------------------------------------
@@ -736,11 +777,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
var in io.ReadCloser
if !o.translatedLink {
var fd *os.File
fd, err = file.Open(o.path)
if fd != nil {
in = newFadviseReadCloser(o, fd, 0, 0)
}
in, err = file.Open(o.path)
} else {
in, err = o.openTranslatedLink(0, -1)
}
@@ -794,6 +831,13 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// Storable returns a boolean showing if this object is storable
func (o *Object) Storable() bool {
// Check for control characters in the remote name and show non storable
for _, c := range o.Remote() {
if c >= 0x00 && c < 0x20 || c == 0x7F {
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
return false
}
}
mode := o.mode
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
@@ -828,10 +872,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
@@ -869,7 +913,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
var hasher *hash.MultiHasher
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -877,12 +921,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return nil, err
}
}
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
@@ -899,22 +938,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
if hasher == nil {
// no need to wrap since we don't need checksums
return wrappedFd, nil
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the hashes as we go along
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hasher,
hash: hash,
fd: fd,
}
return in, nil
@@ -936,23 +975,18 @@ func (nwc nopWriterCloser) Close() error {
}
// Update the object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser
var hasher *hash.MultiHasher
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return err
}
}
hashes = x.Hashes
}
}
err = o.mkdirAll()
err := o.mkdirAll()
if err != nil {
return err
}
@@ -964,17 +998,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if !o.translatedLink {
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
if runtime.GOOS == "windows" && os.IsPermission(err) {
// If permission denied on Windows might be trying to update a
// hidden file, in which case try opening without CREATE
// See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666)
if err != nil {
return err
}
} else {
return err
}
return err
}
// Pre-allocate the file for performance reasons
err = preAllocate(src.Size(), f)
@@ -987,9 +1011,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Calculate the hash of the object we are reading as we go along
if hasher != nil {
in = io.TeeReader(in, hasher)
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
@@ -1025,11 +1051,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// All successful so update the hashes
if hasher != nil {
o.fs.objectHashesMu.Lock()
o.hashes = hasher.Sums()
o.fs.objectHashesMu.Unlock()
}
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(ctx, src.ModTime(ctx))
@@ -1048,7 +1072,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
// Temporary Object under construction
o := f.newObject(remote)
o := f.newObject(remote, "")
err := o.mkdirAll()
if err != nil {
@@ -1100,7 +1124,29 @@ func (o *Object) Remove(ctx context.Context) error {
return remove(o.path)
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
// cleanPathFragment cleans an OS path fragment which is part of a
// bigger path and not necessarily absolute
func cleanPathFragment(s string) string {
if s == "" {
return s
}
s = filepath.Clean(s)
if runtime.GOOS == "windows" {
s = strings.Replace(s, `/`, `\`, -1)
}
return s
}
// cleanPath cleans and makes absolute the path passed in and returns
// an OS path.
//
// The input might be in OS form or rclone form or a mixture, but the
// output is in OS form.
//
// On windows it makes the path UNC also and replaces any characters
// Windows can't deal with with their replacements.
func (f *Fs) cleanPath(s string) string {
s = cleanPathFragment(s)
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
@@ -1108,24 +1154,19 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
s = s2
}
}
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
if !f.opt.NoUNC {
// Convert to UNC
s = uncPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
s = cleanWindowsName(f, s)
} else {
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
}
s = enc.FromStandardPath(s)
return s
}
@@ -1134,21 +1175,63 @@ var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
// uncPath converts an absolute Windows path
// to a UNC long path.
func uncPath(l string) string {
func uncPath(s string) string {
// UNC can NOT use "/", so convert all to "\"
s = strings.Replace(s, `/`, `\`, -1)
// If prefix is "\\", we already have a UNC path or server.
if strings.HasPrefix(l, `\\`) {
if strings.HasPrefix(s, `\\`) {
// If already long path, just keep it
if strings.HasPrefix(l, `\\?\`) {
return l
if strings.HasPrefix(s, `\\?\`) {
return s
}
// Trim "\\" from path and add UNC prefix.
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
}
if isAbsWinDrive.MatchString(l) {
return `\\?\` + l
if isAbsWinDrive.MatchString(s) {
return `\\?\` + s
}
return l
return s
}
// cleanWindowsName will clean invalid Windows characters replacing them with _
func cleanWindowsName(f *Fs, name string) string {
original := name
var name2 string
if strings.HasPrefix(name, `\\?\`) {
name2 = `\\?\`
name = strings.TrimPrefix(name, `\\?\`)
}
if strings.HasPrefix(name, `//?/`) {
name2 = `//?/`
name = strings.TrimPrefix(name, `//?/`)
}
// Colon is allowed as part of a drive name X:\
colonAt := strings.Index(name, ":")
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
// Copy to name2, which is unfiltered
name2 += name[0 : colonAt+1]
name = name[colonAt+1:]
}
name2 += strings.Map(func(r rune) rune {
switch r {
case '<', '>', '"', '|', '?', '*', ':':
return '_'
}
return r
}, name)
if name2 != original && f != nil {
f.wmu.Lock()
if _, ok := f.warned[name]; !ok {
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
f.warned[name] = struct{}{}
}
f.wmu.Unlock()
}
return name2
}
// Check the interfaces are satisfied

View File

@@ -25,6 +25,19 @@ func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestMapper(t *testing.T) {
m := newMapper()
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "potato", m.Save("potato", "potato"))
assert.Equal(t, m.m, map[string]string{})
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
assert.Equal(t, m.m, map[string]string{
"-r'áö": "-r?'a´o¨",
})
assert.Equal(t, "potato", m.Load("potato"))
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
}
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
@@ -44,7 +57,7 @@ func TestUpdatingCheck(t *testing.T) {
require.NoError(t, err)
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
hash, err := hash.NewMultiHasherTypes(hash.Supported())
hash, err := hash.NewMultiHasherTypes(hash.Supported)
require.NoError(t, err)
in := localOpenFile{
o: o,

View File

@@ -1,26 +1,29 @@
package local
import (
"runtime"
"testing"
)
var uncTestPaths = []string{
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
`C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
`\\server\share\Desktop`,
`\\?\UNC\\share\folder\Desktop`,
`\\server\share`,
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
"C:\\Windows\\Folder",
"\\\\?\\C:\\Windows\\Folder",
"//?/C:/Windows/Folder",
"\\\\?\\UNC\\server\\share\\Desktop",
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
"\\\\server\\share\\Desktop",
"\\\\?\\UNC\\\\share\\folder\\Desktop",
"\\\\server\\share",
}
var uncTestPathsResults = []string{
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\C:\Windows\Folder`,
`\\?\UNC\server\share\Desktop`,
@@ -48,23 +51,38 @@ func TestUncPaths(t *testing.T) {
}
}
// Test Windows character replacements
var testsWindows = [][2]string{
{`c:\temp`, `c:\temp`},
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`c:/temp`, `c:\temp`},
{`C:/temp/file.txt`, `C:\temp\file.txt`},
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\#¤%&\()=;^+-`},
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\&\&\&`},
var utf8Tests = [][2]string{
{"ABC", "ABC"},
{string([]byte{0x80}), "<22>"},
{string([]byte{'a', 0x80, 'b'}), "a<>b"},
}
func TestCleanWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skipf("windows only")
}
for _, test := range testsWindows {
got := cleanRootPath(test[0], true, defaultEnc)
func TestCleanRemote(t *testing.T) {
f := &Fs{}
f.warned = make(map[string]struct{})
for _, test := range utf8Tests {
got := f.cleanRemote(test[0])
expect := test[1]
if got != expect {
t.Fatalf("got %q, expected %q", got, expect)
}
}
}
// Test Windows character replacements
var testsWindows = [][2]string{
{`c:\temp`, `c:\temp`},
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
{"c:/temp", "c:/temp"},
{"/temp/file.txt", "/temp/file.txt"},
{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
}
func TestCleanWindows(t *testing.T) {
for _, test := range testsWindows {
got := cleanWindowsName(nil, test[0])
expect := test[1]
if got != expect {
t.Fatalf("got %q, expected %q", got, expect)

View File

@@ -1,107 +0,0 @@
package api
// BIN protocol constants
const (
BinContentType = "application/x-www-form-urlencoded"
TreeIDLength = 12
DunnoNodeIDLength = 16
)
// Operations in binary protocol
const (
OperationAddFile = 103 // 0x67
OperationRename = 105 // 0x69
OperationCreateFolder = 106 // 0x6A
OperationFolderList = 117 // 0x75
OperationSharedFoldersList = 121 // 0x79
// TODO investigate opcodes below
Operation154MaybeItemInfo = 154 // 0x9A
Operation102MaybeAbout = 102 // 0x66
Operation104MaybeDelete = 104 // 0x68
)
// CreateDir protocol constants
const (
MkdirResultOK = 0
MkdirResultSourceNotExists = 1
MkdirResultAlreadyExists = 4
MkdirResultExistsDifferentCase = 9
MkdirResultInvalidName = 10
MkdirResultFailed254 = 254
)
// Move result codes
const (
MoveResultOK = 0
MoveResultSourceNotExists = 1
MoveResultFailed002 = 2
MoveResultAlreadyExists = 4
MoveResultFailed005 = 5
MoveResultFailed254 = 254
)
// AddFile result codes
const (
AddResultOK = 0
AddResultError01 = 1
AddResultDunno04 = 4
AddResultWrongPath = 5
AddResultNoFreeSpace = 7
AddResultDunno09 = 9
AddResultInvalidName = 10
AddResultNotModified = 12
AddResultFailedA = 253
AddResultFailedB = 254
)
// List request options
const (
ListOptTotalSpace = 1
ListOptDelete = 2
ListOptFingerprint = 4
ListOptUnknown8 = 8
ListOptUnknown16 = 16
ListOptFolderSize = 32
ListOptUsedSpace = 64
ListOptUnknown128 = 128
ListOptUnknown256 = 256
)
// ListOptDefaults ...
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
// List parse flags
const (
ListParseDone = 0
ListParseReadItem = 1
ListParsePin = 2
ListParsePinUpper = 3
ListParseUnknown15 = 15
)
// List operation results
const (
ListResultOK = 0
ListResultNotExists = 1
ListResultDunno02 = 2
ListResultDunno03 = 3
ListResultAlreadyExists04 = 4
ListResultDunno05 = 5
ListResultDunno06 = 6
ListResultDunno07 = 7
ListResultDunno08 = 8
ListResultAlreadyExists09 = 9
ListResultDunno10 = 10
ListResultDunno11 = 11
ListResultDunno12 = 12
ListResultFailedB = 253
ListResultFailedA = 254
)
// Directory item types
const (
ListItemMountPoint = 0
ListItemFile = 1
ListItemFolder = 2
ListItemSharedFolder = 3
)

View File

@@ -1,225 +0,0 @@
package api
// BIN protocol helpers
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"log"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/lib/readers"
)
// protocol errors
var (
ErrorPrematureEOF = errors.New("Premature EOF")
ErrorInvalidLength = errors.New("Invalid length")
ErrorZeroTerminate = errors.New("String must end with zero")
)
// BinWriter is a binary protocol writer
type BinWriter struct {
b *bytes.Buffer // growing byte buffer
a []byte // temporary buffer for next varint
}
// NewBinWriter creates a binary protocol helper
func NewBinWriter() *BinWriter {
return &BinWriter{
b: new(bytes.Buffer),
a: make([]byte, binary.MaxVarintLen64),
}
}
// Bytes returns binary data
func (w *BinWriter) Bytes() []byte {
return w.b.Bytes()
}
// Reader returns io.Reader with binary data
func (w *BinWriter) Reader() io.Reader {
return bytes.NewReader(w.b.Bytes())
}
// WritePu16 writes a short as unsigned varint
func (w *BinWriter) WritePu16(val int) {
if val < 0 || val > 65535 {
log.Fatalf("Invalid UInt16 %v", val)
}
w.WritePu64(int64(val))
}
// WritePu32 writes a signed long as unsigned varint
func (w *BinWriter) WritePu32(val int64) {
if val < 0 || val > 4294967295 {
log.Fatalf("Invalid UInt32 %v", val)
}
w.WritePu64(val)
}
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
func (w *BinWriter) WritePu64(val int64) {
if val < 0 {
log.Fatalf("Invalid UInt64 %v", val)
}
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)
w.WritePu64(int64(len(buf) + 1))
w.b.Write(buf)
w.b.WriteByte(0)
}
// Write writes a byte buffer
func (w *BinWriter) Write(buf []byte) {
w.b.Write(buf)
}
// WriteWithLength writes a byte buffer prepended with its length as varint
func (w *BinWriter) WriteWithLength(buf []byte) {
w.WritePu64(int64(len(buf)))
w.b.Write(buf)
}
// BinReader is a binary protocol reader helper
type BinReader struct {
b *bufio.Reader
count *readers.CountingReader
err error // keeps the first error encountered
}
// NewBinReader creates a binary protocol reader helper
func NewBinReader(reader io.Reader) *BinReader {
r := &BinReader{}
r.count = readers.NewCountingReader(reader)
r.b = bufio.NewReader(r.count)
return r
}
// Count returns number of bytes read
func (r *BinReader) Count() uint64 {
return r.count.BytesRead()
}
// Error returns first encountered error or nil
func (r *BinReader) Error() error {
return r.err
}
// check() keeps the first error encountered in a stream
func (r *BinReader) check(err error) bool {
if err == nil {
return true
}
if r.err == nil {
// keep the first error
r.err = err
}
if err != io.EOF {
log.Fatalf("Error parsing response: %v", err)
}
return false
}
// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors
func (r *BinReader) ReadByteAsInt() int {
if octet, err := r.b.ReadByte(); r.check(err) {
return int(octet)
}
return -1
}
// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors
func (r *BinReader) ReadByteAsShort() int16 {
if octet, err := r.b.ReadByte(); r.check(err) {
return int16(octet)
}
return -1
}
// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors
func (r *BinReader) ReadIntSpl() int {
var val uint16
if r.check(binary.Read(r.b, binary.LittleEndian, &val)) {
return int(val)
}
return -1
}
// ReadULong returns uint64 equivalent of -1 for EOF or errors
func (r *BinReader) ReadULong() uint64 {
if val, err := binary.ReadUvarint(r.b); r.check(err) {
return val
}
return 0xffffffffffffffff
}
// ReadPu32 returns -1 for EOF or errors
func (r *BinReader) ReadPu32() int64 {
if val, err := binary.ReadUvarint(r.b); r.check(err) {
return int64(val)
}
return -1
}
// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors
func (r *BinReader) ReadNBytes(len int) []byte {
buf := make([]byte, len)
n, err := r.b.Read(buf)
if r.check(err) {
return buf
}
if n != len {
r.check(ErrorPrematureEOF)
}
return buf
}
// ReadBytesByLength reads buffer length and its bytes
func (r *BinReader) ReadBytesByLength() []byte {
len := r.ReadPu32()
if len < 0 {
r.check(ErrorInvalidLength)
return []byte{}
}
return r.ReadNBytes(int(len))
}
// ReadString reads a zero-terminated string with length
func (r *BinReader) ReadString() string {
len := int(r.ReadPu32())
if len < 1 {
r.check(ErrorInvalidLength)
return ""
}
buf := make([]byte, len-1)
n, err := r.b.Read(buf)
if !r.check(err) {
return ""
}
if n != len-1 {
r.check(ErrorPrematureEOF)
return ""
}
zeroByte, err := r.b.ReadByte()
if !r.check(err) {
return ""
}
if zeroByte != 0 {
r.check(ErrorZeroTerminate)
return ""
}
return string(buf)
}
// ReadDate reads a Unix encoded time
func (r *BinReader) ReadDate() time.Time {
return time.Unix(r.ReadPu32(), 0)
}

View File

@@ -1,248 +0,0 @@
package api
import (
"fmt"
)
// M1 protocol constants and structures
const (
APIServerURL = "https://cloud.mail.ru"
PublicLinkURL = "https://cloud.mail.ru/public/"
DispatchServerURL = "https://dispatcher.cloud.mail.ru"
OAuthURL = "https://o2.mail.ru/token"
OAuthClientID = "cloud-win"
)
// ServerErrorResponse represents erroneous API response.
type ServerErrorResponse struct {
Message string `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
func (e *ServerErrorResponse) Error() string {
return fmt.Sprintf("server error %d (%s)", e.Status, e.Message)
}
// FileErrorResponse represents erroneous API response for a file
type FileErrorResponse struct {
Body struct {
Home struct {
Value string `json:"value"`
Error string `json:"error"`
} `json:"home"`
} `json:"body"`
Status int `json:"status"`
Account string `json:"email,omitempty"`
Time int64 `json:"time,omitempty"`
Message string // non-json, calculated field
}
func (e *FileErrorResponse) Error() string {
return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error)
}
// UserInfoResponse contains account metadata
type UserInfoResponse struct {
Body struct {
AccountType string `json:"account_type"`
AccountVerified bool `json:"account_verified"`
Cloud struct {
Beta struct {
Allowed bool `json:"allowed"`
Asked bool `json:"asked"`
} `json:"beta"`
Billing struct {
ActiveCostID string `json:"active_cost_id"`
ActiveRateID string `json:"active_rate_id"`
AutoProlong bool `json:"auto_prolong"`
Basequota int64 `json:"basequota"`
Enabled bool `json:"enabled"`
Expires int `json:"expires"`
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
Subscription []interface{} `json:"subscription"`
Version string `json:"version"`
} `json:"billing"`
Bonuses struct {
CameraUpload bool `json:"camera_upload"`
Complete bool `json:"complete"`
Desktop bool `json:"desktop"`
Feedback bool `json:"feedback"`
Links bool `json:"links"`
Mobile bool `json:"mobile"`
Registration bool `json:"registration"`
} `json:"bonuses"`
Enable struct {
Sharing bool `json:"sharing"`
} `json:"enable"`
FileSizeLimit int64 `json:"file_size_limit"`
Space struct {
BytesTotal int64 `json:"bytes_total"`
BytesUsed int `json:"bytes_used"`
Overquota bool `json:"overquota"`
} `json:"space"`
} `json:"cloud"`
Cloudflags struct {
Exists bool `json:"exists"`
} `json:"cloudflags"`
Domain string `json:"domain"`
Login string `json:"login"`
Newbie bool `json:"newbie"`
UI struct {
ExpandLoader bool `json:"expand_loader"`
Kind string `json:"kind"`
Sidebar bool `json:"sidebar"`
Sort struct {
Order string `json:"order"`
Type string `json:"type"`
} `json:"sort"`
Thumbs bool `json:"thumbs"`
} `json:"ui"`
} `json:"body"`
Email string `json:"email"`
Status int `json:"status"`
Time int64 `json:"time"`
}
// ListItem ...
type ListItem struct {
Count struct {
Folders int `json:"folders"`
Files int `json:"files"`
} `json:"count,omitempty"`
Kind string `json:"kind"`
Type string `json:"type"`
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
Grev int `json:"grev,omitempty"`
Rev int `json:"rev,omitempty"`
}
// ItemInfoResponse ...
type ItemInfoResponse struct {
Email string `json:"email"`
Body ListItem `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// FolderInfoResponse ...
type FolderInfoResponse struct {
Body struct {
Count struct {
Folders int `json:"folders"`
Files int `json:"files"`
} `json:"count"`
Tree string `json:"tree"`
Name string `json:"name"`
Grev int `json:"grev"`
Size int64 `json:"size"`
Sort struct {
Order string `json:"order"`
Type string `json:"type"`
} `json:"sort"`
Kind string `json:"kind"`
Rev int `json:"rev"`
Type string `json:"type"`
Home string `json:"home"`
List []ListItem `json:"list"`
} `json:"body,omitempty"`
Time int64 `json:"time"`
Status int `json:"status"`
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`
Time int64 `json:"time"`
StatusStr string `json:"status"`
}
// GenericResponse ...
type GenericResponse struct {
Email string `json:"email"`
Time int64 `json:"time"`
Status int `json:"status"`
// ignore other fields
}
// GenericBodyResponse ...
type GenericBodyResponse struct {
Email string `json:"email"`
Body string `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
// Test Mailru filesystem interface
package mailru_test
import (
"testing"
"github.com/rclone/rclone/backend/mailru"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestMailru:",
NilObject: (*mailru.Object)(nil),
SkipBadWindowsCharacters: true,
})
}

View File

@@ -1,134 +0,0 @@
// Package mrhash implements the mailru hash, which is a modified SHA1.
// If file size is less than or equal to the SHA1 block size (20 bytes),
// its hash is simply its data right-padded with zero bytes.
// Hash sum of a larger file is computed as a SHA1 sum of the file data
// bytes concatenated with a decimal representation of the data length.
package mrhash
import (
"crypto/sha1"
"encoding"
"encoding/hex"
"errors"
"hash"
"strconv"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha1.BlockSize
// Size of the checksum in bytes.
Size = sha1.Size
startString = "mrCloud"
hashError = "hash function returned error"
)
// Global errors
var (
ErrorInvalidHash = errors.New("invalid hash")
)
type digest struct {
total int // bytes written into hash so far
sha hash.Hash // underlying SHA1
small []byte // small content
}
// New returns a new hash.Hash computing the Mailru checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n, err = d.sha.Write(p)
if err != nil {
panic(hashError)
}
d.total += n
if d.total <= Size {
d.small = append(d.small, p...)
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (d *digest) Sum(b []byte) []byte {
// If content is small, return it padded to Size
if d.total <= Size {
padded := make([]byte, Size)
copy(padded, d.small)
return append(b, padded...)
}
endString := strconv.Itoa(d.total)
copy, err := cloneSHA1(d.sha)
if err == nil {
_, err = copy.Write([]byte(endString))
}
if err != nil {
panic(hashError)
}
return copy.Sum(b)
}
// cloneSHA1 clones state of SHA1 hash
func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) {
state, err := orig.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
clone = sha1.New()
err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state)
return
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.sha = sha1.New()
_, _ = d.sha.Write([]byte(startString))
d.total = 0
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return BlockSize
}
// Sum returns the Mailru checksum of the data.
func Sum(data []byte) []byte {
var d digest
d.Reset()
_, _ = d.Write(data)
return d.Sum(nil)
}
// DecodeString converts a string to the Mailru hash
func DecodeString(s string) ([]byte, error) {
b, err := hex.DecodeString(s)
if err != nil || len(b) != Size {
return nil, ErrorInvalidHash
}
return b, nil
}
// must implement this interface
var (
_ hash.Hash = (*digest)(nil)
)

View File

@@ -1,81 +0,0 @@
package mrhash_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/rclone/rclone/backend/mailru/mrhash"
"github.com/stretchr/testify/assert"
)
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {
n int
want string
}{
{0, "0000000000000000000000000000000000000000"},
{1, "4100000000000000000000000000000000000000"},
{2, "4141000000000000000000000000000000000000"},
{19, "4141414141414141414141414141414141414100"},
{20, "4141414141414141414141414141414141414141"},
{21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"},
{22, "037e6d960601118a0639afbeff30fe716c66ed2d"},
{4096, "45a16aa192502b010280fb5b44274c601a91fd9f"},
{4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"},
{4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"},
{4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"},
{8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"},
{8388608, "267a970917c624c11fe624276ec60233a66dc2c0"},
{8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"},
} {
d := mrhash.New()
var toWrite int
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
n, err := d.Write(data)
assert.Nil(t, err)
assert.Equal(t, chunk, n)
}
n, err := d.Write(data[:toWrite])
assert.Nil(t, err)
assert.Equal(t, toWrite, n)
got1 := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n))
got2 := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n))
}
}
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
func TestSumCalledTwice(t *testing.T) {
d := mrhash.New()
assert.NotPanics(t, func() { d.Sum(nil) })
d.Reset()
assert.NotPanics(t, func() { d.Sum(nil) })
assert.NotPanics(t, func() { d.Sum(nil) })
_, _ = d.Write([]byte{1})
assert.NotPanics(t, func() { d.Sum(nil) })
}
func TestSize(t *testing.T) {
d := mrhash.New()
assert.Equal(t, 20, d.Size())
}
func TestBlockSize(t *testing.T) {
d := mrhash.New()
assert.Equal(t, 64, d.BlockSize())
}

View File

@@ -26,13 +26,11 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
mega "github.com/t3rm1n4l/go-mega"
@@ -82,24 +80,16 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
Default: (encoder.Base |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
User string `config:"user"`
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
}
// Fs represents a remote mega
@@ -255,22 +245,21 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// splitNodePath splits nodePath into / separated parts, returning nil if it
// should refer to the root.
// It also encodes the parts into backend specific encoding
func (f *Fs) splitNodePath(nodePath string) (parts []string) {
// should refer to the root
func splitNodePath(nodePath string) (parts []string) {
nodePath = path.Clean(nodePath)
if nodePath == "." || nodePath == "/" {
parts = strings.Split(nodePath, "/")
if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
return nil
}
nodePath = f.opt.Enc.FromStandardPath(nodePath)
return strings.Split(nodePath, "/")
return parts
}
// findNode looks up the node for the path of the name given from the root given
//
// It returns mega.ENOENT if it wasn't found
func (f *Fs) findNode(rootNode *mega.Node, nodePath string) (*mega.Node, error) {
parts := f.splitNodePath(nodePath)
parts := splitNodePath(nodePath)
if parts == nil {
return rootNode, nil
}
@@ -327,7 +316,7 @@ func (f *Fs) mkdir(rootNode *mega.Node, dir string) (node *mega.Node, err error)
f.mkdirMu.Lock()
defer f.mkdirMu.Unlock()
parts := f.splitNodePath(dir)
parts := splitNodePath(dir)
if parts == nil {
return rootNode, nil
}
@@ -429,7 +418,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
errors := 0
// similar to f.deleteNode(trash) but with HardDelete as true
for _, item := range items {
fs.Debugf(f, "Deleting trash %q", f.opt.Enc.ToStandardName(item.GetName()))
fs.Debugf(f, "Deleting trash %q", item.GetName())
deleteErr := f.pacer.Call(func() (bool, error) {
err := f.srv.Delete(item, true)
return shouldRetry(err)
@@ -511,7 +500,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
var iErr error
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
remote := path.Join(dir, f.opt.Enc.ToStandardName(info.GetName()))
remote := path.Join(dir, info.GetName())
switch info.GetType() {
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
@@ -733,7 +722,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
if srcLeaf != dstLeaf {
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Rename(info, f.opt.Enc.FromStandardName(dstLeaf))
err = f.srv.Rename(info, dstLeaf)
return shouldRetry(err)
})
if err != nil {
@@ -882,13 +871,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// move them into place
for _, info := range infos {
fs.Infof(srcDir, "merging %q", f.opt.Enc.ToStandardName(info.GetName()))
fs.Infof(srcDir, "merging %q", info.GetName())
err = f.pacer.Call(func() (bool, error) {
err = f.srv.Move(info, dstDirNode)
return shouldRetry(err)
})
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", f.opt.Enc.ToStandardName(info.GetName()), srcDir)
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
}
}
// rmdir (into trash) the now empty source directory
@@ -1131,7 +1120,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var u *mega.Upload
err = o.fs.pacer.Call(func() (bool, error) {
u, err = o.fs.srv.NewUpload(dirNode, o.fs.opt.Enc.FromStandardName(leaf), size)
u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
return shouldRetry(err)
})
if err != nil {

View File

@@ -1,624 +0,0 @@
// Package memory provides an interface to an in memory object storage system
package memory
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
)
var (
hashType = hash.MD5
// the object storage is persistent
buckets = newBucketsInfo()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "memory",
Description: "In memory object storage system.",
NewFs: NewFs,
Options: []fs.Option{},
})
}
// Options defines the configuration for this backend
type Options struct {
}
// Fs represents a remote memory server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
features *fs.Features // optional features
}
// bucketsInfo holds info about all the buckets
type bucketsInfo struct {
mu sync.RWMutex
buckets map[string]*bucketInfo
}
func newBucketsInfo() *bucketsInfo {
return &bucketsInfo{
buckets: make(map[string]*bucketInfo, 16),
}
}
// getBucket gets a names bucket or nil
func (bi *bucketsInfo) getBucket(name string) (b *bucketInfo) {
bi.mu.RLock()
b = bi.buckets[name]
bi.mu.RUnlock()
return b
}
// makeBucket returns the bucket or makes it
func (bi *bucketsInfo) makeBucket(name string) (b *bucketInfo) {
bi.mu.Lock()
defer bi.mu.Unlock()
b = bi.buckets[name]
if b != nil {
return b
}
b = newBucketInfo()
bi.buckets[name] = b
return b
}
// deleteBucket deleted the bucket or returns an error
func (bi *bucketsInfo) deleteBucket(name string) error {
bi.mu.Lock()
defer bi.mu.Unlock()
b := bi.buckets[name]
if b == nil {
return fs.ErrorDirNotFound
}
if !b.isEmpty() {
return fs.ErrorDirectoryNotEmpty
}
delete(bi.buckets, name)
return nil
}
// getObjectData gets an object from (bucketName, bucketPath) or nil
func (bi *bucketsInfo) getObjectData(bucketName, bucketPath string) (od *objectData) {
b := bi.getBucket(bucketName)
if b == nil {
return nil
}
return b.getObjectData(bucketPath)
}
// updateObjectData updates an object from (bucketName, bucketPath)
func (bi *bucketsInfo) updateObjectData(bucketName, bucketPath string, od *objectData) {
b := bi.makeBucket(bucketName)
b.mu.Lock()
b.objects[bucketPath] = od
b.mu.Unlock()
}
// removeObjectData removes an object from (bucketName, bucketPath) returning true if removed
func (bi *bucketsInfo) removeObjectData(bucketName, bucketPath string) (removed bool) {
b := bi.getBucket(bucketName)
if b != nil {
b.mu.Lock()
od := b.objects[bucketPath]
if od != nil {
delete(b.objects, bucketPath)
removed = true
}
b.mu.Unlock()
}
return removed
}
// bucketInfo holds info about a single bucket
type bucketInfo struct {
mu sync.RWMutex
objects map[string]*objectData
}
func newBucketInfo() *bucketInfo {
return &bucketInfo{
objects: make(map[string]*objectData, 16),
}
}
// getBucket gets a names bucket or nil
func (bi *bucketInfo) getObjectData(name string) (od *objectData) {
bi.mu.RLock()
od = bi.objects[name]
bi.mu.RUnlock()
return od
}
// getBucket gets a names bucket or nil
func (bi *bucketInfo) isEmpty() (empty bool) {
bi.mu.RLock()
empty = len(bi.objects) == 0
bi.mu.RUnlock()
return empty
}
// the object data and metadata
type objectData struct {
modTime time.Time
hash string
mimeType string
data []byte
}
// Object describes a memory object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
od *objectData // the object data
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Memory root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
opt: *opt,
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
od := buckets.getObjectData(f.rootBucket, f.rootDirectory)
if od != nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
err = fs.ErrorIsFile
}
}
return f, err
}
// newObject makes an object from a remote and an objectData
func (f *Fs) newObject(remote string, od *objectData) *Object {
return &Object{fs: f, remote: remote, od: od}
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
bucket, bucketPath := f.split(remote)
od := buckets.getObjectData(bucket, bucketPath)
if od == nil {
return nil, fs.ErrorObjectNotFound
}
return f.newObject(remote, od), nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, entry fs.DirEntry, isDirectory bool) error
// list the buckets to fn
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
b := buckets.getBucket(bucket)
if b == nil {
return fs.ErrorDirNotFound
}
b.mu.RLock()
defer b.mu.RUnlock()
dirs := make(map[string]struct{})
for absPath, od := range b.objects {
if strings.HasPrefix(absPath, directory) {
remote := absPath[len(prefix):]
if !recurse {
localPath := absPath[len(directory):]
slash := strings.IndexRune(localPath, '/')
if slash >= 0 {
// send a directory if have a slash
dir := directory + localPath[:slash]
if addBucket {
dir = path.Join(bucket, dir)
}
_, found := dirs[dir]
if !found {
err = fn(dir, fs.NewDir(dir, time.Time{}), true)
if err != nil {
return err
}
dirs[dir] = struct{}{}
}
continue // don't send this file if not recursing
}
}
// send an object
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, f.newObject(remote, od), false)
if err != nil {
return err
}
}
}
return nil
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
entries = append(entries, entry)
return nil
})
return entries, err
}
// listBuckets lists the buckets to entries
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
buckets.mu.RLock()
defer buckets.mu.RUnlock()
for name := range buckets.buckets {
entries = append(entries, fs.NewDir(name, time.Time{}))
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
}
return list.Flush()
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
od: &objectData{
modTime: src.ModTime(ctx),
},
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
buckets.makeBucket(bucket)
return nil
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
return nil
}
return buckets.deleteBucket(bucket)
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
_ = buckets.makeBucket(dstBucket)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
od := buckets.getObjectData(srcBucket, srcPath)
if od == nil {
return nil, fs.ErrorObjectNotFound
}
buckets.updateObjectData(dstBucket, dstPath, od)
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hashType)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hashType {
return "", hash.ErrUnsupported
}
if o.od.hash == "" {
sum := md5.Sum(o.od.data)
o.od.hash = hex.EncodeToString(sum[:])
}
return o.od.hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(len(o.od.data))
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
//
// SHA-1 will also be updated once the request has completed.
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
return o.od.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.od.modTime = modTime
return nil
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, limit = x.Decode(int64(len(o.od.data)))
case *fs.SeekOption:
offset = x.Offset
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if offset > int64(len(o.od.data)) {
offset = int64(len(o.od.data))
}
data := o.od.data[offset:]
if limit >= 0 {
if limit > int64(len(data)) {
limit = int64(len(data))
}
data = data[:limit]
}
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := ioutil.ReadAll(in)
if err != nil {
return errors.Wrap(err, "failed to update memory object")
}
o.od = &objectData{
data: data,
hash: "",
modTime: src.ModTime(ctx),
mimeType: fs.MimeType(ctx, o),
}
buckets.updateObjectData(bucket, bucketPath, o.od)
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
removed := buckets.removeObjectData(bucket, bucketPath)
if !removed {
return fs.ErrorObjectNotFound
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.od.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -1,16 +0,0 @@
// Test memory filesystem interface
package memory
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: ":memory:",
NilObject: (*Object)(nil),
})
}

View File

@@ -12,13 +12,13 @@ import (
"log"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/lib/atexit"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -26,9 +26,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
@@ -60,25 +58,20 @@ var (
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// QuickXorHashType is the hash.Type for OneDrive
QuickXorHashType hash.Type
)
// Register with Fs
func init() {
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
fs.Register(&fs.RegInfo{
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
err := oauthutil.Config("onedrive", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
@@ -150,7 +143,7 @@ func init() {
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
_, err := srv.CallJSON(&opts, nil, &sites)
if err != nil {
log.Fatalf("Failed to query available sites: %v", err)
}
@@ -179,7 +172,7 @@ func init() {
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
_, err := srv.CallJSON(&opts, nil, &drives)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
@@ -201,7 +194,7 @@ func init() {
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
_, err = srv.CallJSON(&opts, nil, &rootItem)
if err != nil {
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
}
@@ -224,9 +217,9 @@ func init() {
Help: "Microsoft App Client Secret\nLeave blank normally.",
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
Help: `Chunk size to upload files with - must be multiple of 320k.
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
Above this size files will be chunked - must be multiple of 320k. Note
that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
@@ -251,63 +244,16 @@ delete OneNote files or otherwise want them to show up in directory
listing, set this option.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
// # (number sign) -> '' // FULLWIDTH NUMBER SIGN
// % (percent sign) -> '' // FULLWIDTH PERCENT SIGN
//
// Folder names cannot begin with a tilde ('~')
// List of replaced characters:
// ~ (tilde) -> '' // FULLWIDTH TILDE
//
// Additionally names can't begin with a space ( ) or end with a period (.) or space ( ).
// List of replaced characters:
// . (period) -> '' // FULLWIDTH FULL STOP
// (space) -> '␠' // SYMBOL FOR SPACE
//
// Also encode invalid UTF-8 bytes as json doesn't handle them.
//
// The OneDrive API documentation lists the set of reserved characters, but
// testing showed this list is incomplete. This are the differences:
// - " (double quote) is rejected, but missing in the documentation
// - space at the end of file and folder names is rejected, but missing in the documentation
// - period at the end of file names is rejected, but missing in the documentation
//
// Adding these restrictions to the OneDrive API documentation yields exactly
// the same rules as the Windows naming conventions.
//
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online#path-encoding
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeHashPercent |
encoder.EncodeLeftSpace |
encoder.EncodeLeftTilde |
encoder.EncodeRightPeriod |
encoder.EncodeRightSpace |
encoder.EncodeWin |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
}
// Fs represents a remote one drive
@@ -381,30 +327,13 @@ var retryErrorCodes = []int{
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
retry := false
if resp != nil {
switch resp.StatusCode {
case 401:
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
fs.Debugf(nil, "Should retry: %v", err)
}
case 429: // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
retryAfter, parseErr := strconv.Atoi(values[0])
if parseErr != nil {
fs.Debugf(nil, "Failed to parse Retry-After: %q: %v", values[0], parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
retry = true
err = pacer.RetryAfterError(err, duration)
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
}
}
}
authRetry := false
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
authRetry = true
fs.Debugf(nil, "Should retry: %v", err)
}
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
@@ -414,15 +343,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
//
// If `relPath` == '', do not append the slash (See #3664)
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
if relPath != "" {
relPath = "/" + withTrailingColon(rest.URLPathEscape(f.opt.Enc.FromStandardPath(relPath)))
}
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
@@ -443,11 +367,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
} else {
opts = rest.Opts{
Method: "GET",
Path: "/root:/" + rest.URLPathEscape(f.opt.Enc.FromStandardPath(path)),
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
}
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
return info, resp, err
@@ -502,7 +426,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
}
return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
}
// errorHandler parses a non 2xx error response into an error
@@ -668,7 +592,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
if !ok {
return "", false, errors.New("couldn't find parent ID")
}
info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return "", false, nil
@@ -691,11 +615,11 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
var info *api.Item
opts := newOptsCall(dirID, "POST", "/children")
mkdir := api.CreateItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
ConflictBehavior: "fail",
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -718,7 +642,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
@@ -727,7 +651,7 @@ OUTER:
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -751,7 +675,7 @@ OUTER:
if item.Deleted != nil {
continue
}
item.Name = f.opt.Enc.ToStandardName(item.GetName())
item.Name = restoreReservedChars(item.GetName())
if fn(item) {
found = true
break OUTER
@@ -785,7 +709,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
@@ -869,12 +793,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
opts := newOptsCall(id, "DELETE", "")
opts.NoResponse = true
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
@@ -897,7 +821,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
if check {
// check to see if there are any items
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
return true
})
if err != nil {
@@ -907,7 +831,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return fs.ErrorDirectoryNotEmpty
}
}
err = f.deleteObject(ctx, rootID)
err = f.deleteObject(rootID)
if err != nil {
return err
}
@@ -988,8 +912,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
srcPath := srcObj.fs.rootSlash() + srcObj.remote
dstPath := f.rootSlash() + remote
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
@@ -1007,7 +931,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
id, dstDriveID, _ := parseNormalizedID(directoryID)
replacedLeaf := f.opt.Enc.FromStandardName(leaf)
replacedLeaf := replaceReservedChars(leaf)
copyReq := api.CopyItemRequest{
Name: &replacedLeaf,
ParentReference: api.ItemReference{
@@ -1017,7 +941,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
resp, err = f.srv.CallJSON(&opts, &copyReq, nil)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1091,7 +1015,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
opts := newOptsCall(srcObj.id, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: id,
@@ -1105,7 +1029,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1198,7 +1122,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Get timestamps of src so they can be preserved
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
if err != nil {
return err
}
@@ -1206,7 +1130,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Do the move
opts := newOptsCall(srcID, "PATCH", "")
move := api.MoveItemRequest{
Name: f.opt.Enc.FromStandardName(leaf),
Name: replaceReservedChars(leaf),
ParentReference: &api.ItemReference{
DriveID: dstDriveID,
ID: parsedDstDirID,
@@ -1220,7 +1144,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var resp *http.Response
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1246,7 +1170,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
resp, err = f.srv.CallJSON(&opts, nil, &drive)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1267,12 +1191,12 @@ func (f *Fs) Hashes() hash.Set {
if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
return hash.Set(hash.QuickXorHash)
}
// PublicLink returns a link for downloading without accout.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
if err != nil {
return "", err
}
@@ -1286,7 +1210,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var resp *http.Response
var result api.CreateShareLinkResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
resp, err = f.srv.CallJSON(&opts, &share, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1316,19 +1240,9 @@ func (o *Object) Remote() string {
return o.remote
}
// rootPath returns a path for use in server given a remote
func (f *Fs) rootPath(remote string) string {
return f.rootSlash() + remote
}
// rootPath returns a path for use in local functions
func (o *Object) rootPath() string {
return o.fs.rootPath(o.remote)
}
// srvPath returns a path for use in server given a remote
func (f *Fs) srvPath(remote string) string {
return f.opt.Enc.FromStandardPath(f.rootSlash() + remote)
return replaceReservedChars(f.rootSlash() + remote)
}
// srvPath returns a path for use in server
@@ -1343,7 +1257,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
if t == hash.QuickXorHash {
return o.quickxorhash, nil
}
}
@@ -1405,7 +1319,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.hasMetaData {
return nil
}
info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "itemNotFound" {
@@ -1440,7 +1354,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
opts = rest.Opts{
Method: "PATCH",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
}
} else {
opts = rest.Opts{
@@ -1456,7 +1370,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
@@ -1491,7 +1405,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Options = options
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1514,8 +1428,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
opts = rest.Opts{
Method: "POST",
RootURL: rootURL,
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
drive, id, rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf))),
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
}
} else {
opts = rest.Opts{
@@ -1528,7 +1441,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
@@ -1540,94 +1453,39 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
return response, err
}
// getPosition gets the current position in a multipart upload
func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: url,
}
var info api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return 0, err
}
if len(info.NextExpectedRanges) != 1 {
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
}
position := info.NextExpectedRanges[0]
i := strings.IndexByte(position, '-')
if i < 0 {
return 0, errors.Errorf("no '-' in next expected range: %q", position)
}
position = position[:i]
pos, err = strconv.ParseInt(position, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "bad expected range: %q", position)
}
return pos, nil
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
Body: chunk,
}
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
var skip = int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
toSend := chunkSize - skip
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &toSend,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
Body: chunk,
_, _ = chunk.Seek(0, io.SeekStart)
resp, err = o.fs.srv.Call(&opts)
if resp != nil {
defer fs.CheckClose(resp.Body, &err)
}
_, _ = chunk.Seek(skip, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
pos, posErr := o.getPosition(ctx, url)
if posErr != nil {
fs.Debugf(o, "Failed to read position: %v", posErr)
return false, posErr
retry, err := shouldRetry(resp, err)
if !retry && resp != nil {
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
// read the item
info = &api.Item{}
return false, json.NewDecoder(resp.Body).Decode(info)
}
skip = pos - start
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
switch {
case skip < 0:
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
case skip > chunkSize:
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
case skip == chunkSize:
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
return false, nil
}
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
}
if err != nil {
return shouldRetry(resp, err)
}
body, err = rest.ReadBody(resp)
if err != nil {
return shouldRetry(resp, err)
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
// read the item
info = &api.Item{}
return false, json.Unmarshal(body, info)
}
return false, nil
return retry, err
})
return info, err
}
// cancelUploadSession cancels an upload session
func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
func (o *Object) cancelUploadSession(url string) (err error) {
opts := rest.Opts{
Method: "DELETE",
RootURL: url,
@@ -1635,7 +1493,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return
@@ -1657,7 +1515,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
fs.Debugf(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(ctx, uploadURL)
cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
@@ -1693,7 +1551,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
info, err = o.uploadFragment(uploadURL, position, size, seg, n)
if err != nil {
return nil, err
}
@@ -1720,7 +1578,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
opts = rest.Opts{
Method: "PUT",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
ContentLength: &size,
Body: in,
}
@@ -1734,7 +1592,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
@@ -1786,7 +1644,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id)
return o.fs.deleteObject(o.id)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -0,0 +1,91 @@
/*
Translate file names for one drive
OneDrive reserved characters
The following characters are OneDrive reserved characters, and can't
be used in OneDrive folder and file names.
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
onedrive-business-reserved
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
Note: Folder names can't end with a period (.).
Note: OneDrive for Business file or folder names cannot begin with a
tilde ('~').
*/
package onedrive
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// Onedrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'*': '', // FULLWIDTH ASTERISK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'?': '', // FULLWIDTH QUESTION MARK
':': '', // FULLWIDTH COLON
'|': '', // FULLWIDTH VERTICAL LINE
'#': '', // FULLWIDTH NUMBER SIGN
'%': '', // FULLWIDTH PERCENT SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'.': '', // FULLWIDTH FULL STOP
'~': '', // FULLWIDTH TILDE
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Folder names can't end with a period '.'
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
// OneDrive for Business file or folder names cannot begin with a tilde '~'
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
// Apparently file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,30 @@
package onedrive
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `.~/.~`},
{" leading space", "␠leading space"},
{"~leading tilde", "leading tilde"},
{"trailing dot.", "trailing dot"},
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
{"~leading tilde/~leading tilde/~leading tilde", "leading tilde/leading tilde/leading tilde"},
{"trailing dot./trailing dot./trailing dot.", "trailing dot/trailing dot/trailing dot"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -13,7 +13,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
@@ -21,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
@@ -49,57 +47,14 @@ func init() {
Help: "Password.",
IsPassword: true,
Required: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// List of replaced characters:
// < (less than) -> '' // FULLWIDTH LESS-THAN SIGN
// > (greater than) -> '' // FULLWIDTH GREATER-THAN SIGN
// : (colon) -> '' // FULLWIDTH COLON
// " (double quote) -> '' // FULLWIDTH QUOTATION MARK
// \ (backslash) -> '' // FULLWIDTH REVERSE SOLIDUS
// | (vertical line) -> '' // FULLWIDTH VERTICAL LINE
// ? (question mark) -> '' // FULLWIDTH QUESTION MARK
// * (asterisk) -> '' // FULLWIDTH ASTERISK
//
// Additionally names can't begin or end with a ASCII whitespace.
// List of replaced characters:
// (space) -> '␠' // SYMBOL FOR SPACE
// (horizontal tab) -> '␉' // SYMBOL FOR HORIZONTAL TABULATION
// (line feed) -> '␊' // SYMBOL FOR LINE FEED
// (vertical tab) -> '␋' // SYMBOL FOR VERTICAL TABULATION
// (carriage return) -> '␍' // SYMBOL FOR CARRIAGE RETURN
//
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
//
// https://www.opendrive.com/wp-content/uploads/guides/OpenDrive_API_guide.pdf
Default: (encoder.Base |
encoder.EncodeWin |
encoder.EncodeLeftCrLfHtVt |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeBackSlash |
encoder.EncodeLeftSpace |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8),
}, {
Name: "chunk_size",
Help: `Files will be uploaded in chunks this size.
Note that these chunks are buffered in memory so increasing them will
increase memory use.`,
Default: 10 * fs.MebiByte,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
UserName string `config:"username"`
Password string `config:"password"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UserName string `config:"username"`
Password string `config:"password"`
}
// Fs represents a remote server
@@ -206,7 +161,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Method: "POST",
Path: "/session/login.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
resp, err = f.srv.CallJSON(&opts, &account, &f.session)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -291,7 +246,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
return f.pacer.Call(func() (bool, error) {
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
opts := rest.Opts{
@@ -299,7 +254,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
NoResponse: true,
Path: "/folder/remove.json",
}
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
return f.shouldRetry(resp, err)
})
}
@@ -320,14 +275,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if err != nil {
return err
}
item, err := f.readMetaDataForFolderID(ctx, rootID)
item, err := f.readMetaDataForFolderID(rootID)
if err != nil {
return err
}
if check && len(item.Files) != 0 {
return errors.New("folder not empty")
}
err = f.deleteObject(ctx, rootID)
err = f.deleteObject(rootID)
if err != nil {
return err
}
@@ -398,7 +353,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -455,7 +410,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -554,7 +509,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Method: "POST",
Path: "/folder/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -630,18 +585,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
fs: f,
remote: remote,
}
return o, f.opt.Enc.FromStandardName(leaf), directoryID, nil
return o, leaf, directoryID, nil
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) {
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -681,16 +636,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
var resp *http.Response
response := createFileResponse{}
err := o.fs.pacer.Call(func() (bool, error) {
createFileData := createFile{
SessionID: o.fs.session.SessionID,
FolderID: directoryID,
Name: leaf,
}
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)}
opts := rest.Opts{
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -732,7 +683,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
err = f.pacer.Call(func() (bool, error) {
createDirData := createFolder{
SessionID: f.session.SessionID,
FolderName: f.opt.Enc.FromStandardName(leaf),
FolderName: replaceReservedChars(leaf),
FolderSubParent: pathID,
FolderIsPublic: 0,
FolderPublicUpl: 0,
@@ -743,7 +694,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Method: "POST",
Path: "/folder.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -771,15 +722,15 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", false, errors.Wrap(err, "failed to get folder list")
}
leaf = f.opt.Enc.FromStandardName(leaf)
for _, folder := range folderList.Folders {
folder.Name = restoreReservedChars(folder.Name)
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
if leaf == folder.Name {
@@ -818,7 +769,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
folderList := FolderList{}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -826,7 +777,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for _, folder := range folderList.Folders {
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
folder.Name = restoreReservedChars(folder.Name)
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
remote := path.Join(dir, folder.Name)
// cache the directory ID for later lookups
@@ -837,7 +788,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
for _, file := range folderList.Files {
file.Name = f.opt.Enc.ToStandardName(file.Name)
file.Name = restoreReservedChars(file.Name)
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
remote := path.Join(dir, file.Name)
o, err := f.newObjectWithInfo(ctx, remote, &file)
@@ -900,13 +851,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
NoResponse: true,
Path: "/file/filesettings.json",
}
update := modTimeFile{
SessionID: o.fs.session.SessionID,
FileID: o.id,
FileModificationTime: strconv.FormatInt(modTime.Unix(), 10),
}
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
return o.fs.shouldRetry(resp, err)
})
@@ -926,7 +873,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -945,7 +892,7 @@ func (o *Object) Remove(ctx context.Context) error {
NoResponse: true,
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
}
resp, err := o.fs.srv.Call(ctx, &opts)
resp, err := o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
}
@@ -973,7 +920,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -982,13 +929,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// resp.Body.Close()
// fs.Debugf(nil, "PostOpen: %#v", openResponse)
buf := make([]byte, o.fs.opt.ChunkSize)
// 10 MB chunks size
chunkSize := int64(1024 * 1024 * 10)
buf := make([]byte, int(chunkSize))
chunkOffset := int64(0)
remainingBytes := size
chunkCounter := 0
for remainingBytes > 0 {
currentChunkSize := int64(o.fs.opt.ChunkSize)
currentChunkSize := chunkSize
if currentChunkSize > remainingBytes {
currentChunkSize = remainingBytes
}
@@ -1017,7 +966,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MultipartFileName: o.remote, // ..name of the file for the attached file
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
resp, err = o.fs.srv.CallJSON(&opts, nil, &reply)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1040,7 +989,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/upload/close_file_upload.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
resp, err = o.fs.srv.CallJSON(&opts, &closeUploadData, &closeResponse)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1066,7 +1015,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
NoResponse: true,
Path: "/file/access.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1089,10 +1038,9 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
err = o.fs.pacer.Call(func() (bool, error) {
opts := rest.Opts{
Method: "GET",
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
o.fs.session.SessionID, directoryID, url.QueryEscape(o.fs.opt.Enc.FromStandardName(leaf))),
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err)
})
if err != nil {

View File

@@ -0,0 +1,78 @@
/*
Translate file names for OpenDrive
OpenDrive reserved characters
The following characters are OpenDrive reserved characters, and can't
be used in OpenDrive folder and file names.
\ / : * ? " < > |
OpenDrive files and folders can't have leading or trailing spaces also.
*/
package opendrive
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// OpenDrive has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
':': '', // FULLWIDTH COLON
'*': '', // FULLWIDTH ASTERISK
'?': '', // FULLWIDTH QUESTION MARK
'"': '', // FULLWIDTH QUOTATION MARK
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'|': '', // FULLWIDTH VERTICAL LINE
' ': '␠', // SYMBOL FOR SPACE
}
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
invCharMap map[rune]rune
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// Filenames can't start with space
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Filenames can't end with space
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -0,0 +1,28 @@
package opendrive
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{`\*<>?:|#%".~`, `#%.~`},
{`\*<>?:|#%".~/\*<>?:|#%".~`, `#%.~/#%.~`},
{" leading space", "␠leading space"},
{" path/ leading spaces", "␠path/␠ leading spaces"},
{"trailing space ", "trailing space␠"},
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
@@ -79,23 +78,12 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Pcloud App Client Secret\nLeave blank normally.",
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
//
// TODO: Investigate Unicode simplification ( gets converted to \ server-side)
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote pcloud
@@ -187,6 +175,21 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// substitute reserved characters for pcloud
//
// Generally all characters are allowed in filenames, except the NULL
// byte, forward and backslash (/,\ and \0)
func replaceReservedChars(x string) string {
// Backslash for FULLWIDTH REVERSE SOLIDUS
return strings.Replace(x, "\\", "", -1)
}
// restore reserved characters for pcloud
func restoreReservedChars(x string) string {
// FULLWIDTH REVERSE SOLIDUS for Backslash
return strings.Replace(x, "", "\\", -1)
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
@@ -198,7 +201,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -331,7 +334,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -351,10 +354,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Path: "/createfolder",
Parameters: url.Values{},
}
opts.Parameters.Set("name", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("name", replaceReservedChars(leaf))
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -397,7 +400,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/listfolder",
@@ -409,7 +412,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
var result api.ItemResult
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -427,7 +430,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
continue
}
}
item.Name = f.opt.Enc.ToStandardName(item.Name)
item.Name = restoreReservedChars(item.Name)
if fn(item) {
found = true
break
@@ -455,7 +458,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
@@ -560,7 +563,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -619,13 +622,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: url.Values{},
}
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("toname", replaceReservedChars(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -663,7 +666,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var resp *http.Response
var result api.Error
return f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Update(err)
return shouldRetry(resp, err)
})
@@ -698,12 +701,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Parameters: url.Values{},
}
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("toname", replaceReservedChars(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -795,12 +798,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Parameters: url.Values{},
}
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("toname", replaceReservedChars(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -827,7 +830,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
var q api.UserInfo
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
resp, err = f.srv.CallJSON(&opts, nil, &q)
err = q.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -878,7 +881,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
}
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -981,7 +984,7 @@ func (o *Object) Storable() bool {
}
// downloadURL fetches the download link
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
func (o *Object) downloadURL() (URL string, err error) {
if o.id == "" {
return "", errors.New("can't download - no id")
}
@@ -997,7 +1000,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
}
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -1013,7 +1016,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url, err := o.downloadURL(ctx)
url, err := o.downloadURL()
if err != nil {
return nil, err
}
@@ -1024,7 +1027,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1075,7 +1078,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
}
leaf = o.fs.opt.Enc.FromStandardName(leaf)
leaf = replaceReservedChars(leaf)
opts.Parameters.Set("filename", leaf)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("nopartial", "1")
@@ -1101,7 +1104,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -1131,7 +1134,7 @@ func (o *Object) Remove(ctx context.Context) error {
var result api.ItemResult
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err := o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})

View File

@@ -1,83 +0,0 @@
// Package api contains definitions for using the premiumize.me API
package api
import "fmt"
// Response is returned by all messages and embedded in the
// structures below
type Response struct {
Message string `json:"message,omitempty"`
Status string `json:"status"`
}
// Error statisfies the error interface
func (e *Response) Error() string {
return fmt.Sprintf("%s: %s", e.Status, e.Message)
}
// AsErr checks the status and returns an err if bad or nil if good
func (e *Response) AsErr() error {
if e.Status != "success" {
return e
}
return nil
}
// Item Types
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
)
// Item refers to a file or folder
type Item struct {
Breadcrumbs []Breadcrumb `json:"breadcrumbs"`
CreatedAt int64 `json:"created_at,omitempty"`
ID string `json:"id"`
Link string `json:"link,omitempty"`
Name string `json:"name"`
Size int64 `json:"size,omitempty"`
StreamLink string `json:"stream_link,omitempty"`
Type string `json:"type"`
TranscodeStatus string `json:"transcode_status"`
IP string `json:"ip"`
MimeType string `json:"mime_type"`
}
// Breadcrumb is part the breadcrumb trail for a file or folder. It
// is returned as part of folder/list if required
type Breadcrumb struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
}
// FolderListResponse is the response to folder/list
type FolderListResponse struct {
Response
Content []Item `json:"content"`
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
}
// FolderCreateResponse is the response to folder/create
type FolderCreateResponse struct {
Response
ID string `json:"id,omitempty"`
}
// FolderUploadinfoResponse is the response to folder/uploadinfo
type FolderUploadinfoResponse struct {
Response
Token string `json:"token,omitempty"`
URL string `json:"url,omitempty"`
}
// AccountInfoResponse is the response to account/info
type AccountInfoResponse struct {
Response
CustomerID string `json:"customer_id,omitempty"`
LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit
PremiumUntil int64 `json:"premium_until,omitempty"`
SpaceUsed float64 `json:"space_used,omitempty"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test filesystem interface
package premiumizeme_test
import (
"testing"
"github.com/rclone/rclone/backend/premiumizeme"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPremiumizeMe:",
NilObject: (*premiumizeme.Object)(nil),
})
}

View File

@@ -1 +0,0 @@
test

View File

@@ -1,75 +0,0 @@
package press
import (
"bufio"
"io"
"github.com/klauspost/compress/gzip"
)
// AlgGzip represents gzip compression algorithm
type AlgGzip struct {
level int
blockSize uint32
}
// InitializeGzip initializes the gzip compression Algorithm
func InitializeGzip(bs uint32, level int) Algorithm {
a := new(AlgGzip)
a.blockSize = bs
a.level = level
return a
}
// GetFileExtension returns file extension
func (a *AlgGzip) GetFileExtension() string {
return ".gz"
}
// GetHeader returns the Lz4 compression header
func (a *AlgGzip) GetHeader() []byte {
return []byte{}
}
// GetFooter returns
func (a *AlgGzip) GetFooter() []byte {
return []byte{}
}
// CompressBlock that compresses a block using gzip
func (a *AlgGzip) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) {
// Initialize buffer
bufw := bufio.NewWriterSize(out, int(a.blockSize+(a.blockSize)>>4))
// Initialize block writer
outw, err := gzip.NewWriterLevel(bufw, a.level)
if err != nil {
return 0, 0, err
}
// Compress block
_, err = outw.Write(in)
if err != nil {
return 0, 0, err
}
// Finalize gzip file, flush buffer and return
err = outw.Close()
if err != nil {
return 0, 0, err
}
blockSize := uint32(bufw.Buffered())
err = bufw.Flush()
return blockSize, uint64(len(in)), err
}
// DecompressBlock decompresses Lz4 compressed block
func (a *AlgGzip) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) {
gzipReader, err := gzip.NewReader(in)
if err != nil {
return 0, err
}
written, err := io.Copy(out, gzipReader)
return int(written), err
}

View File

@@ -1,223 +0,0 @@
package press
// This file implements the LZ4 algorithm.
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math/bits"
"github.com/buengese/xxh32"
lz4 "github.com/pierrec/lz4"
)
/*
Structure of LZ4 header:
Flags:
Version = 01
Independent = 1
Block Checksum = 1
Content Size = 0
Content Checksum = 0
Reserved = 0
Dictionary ID = 0
BD byte:
Reserved = 0
Block Max Size = 101 (or 5; 256kb)
Reserved = 0000
Header checksum byte (xxhash(flags and bd byte) >> 1) & 0xff
*/
// LZ4Header - Header of our LZ4 file
//var LZ4Header = []byte{0x04, 0x22, 0x4d, 0x18, 0x70, 0x50, 0x84}
// LZ4Footer - Footer of our LZ4 file
var LZ4Footer = []byte{0x00, 0x00, 0x00, 0x00} // This is just an empty block
const (
frameMagic uint32 = 0x184D2204
compressedBlockFlag = 1 << 31
compressedBlockMask = compressedBlockFlag - 1
)
// AlgLz4 is the Lz4 Compression algorithm
type AlgLz4 struct {
Header lz4.Header
buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
}
// InitializeLz4 creates an Lz4 compression algorithm
func InitializeLz4(bs uint32, blockChecksum bool) Algorithm {
a := new(AlgLz4)
a.Header.Reset()
a.Header = lz4.Header{
BlockChecksum: blockChecksum,
BlockMaxSize: int(bs),
}
return a
}
// GetFileExtension returns file extension
func (a *AlgLz4) GetFileExtension() string {
return ".lz4"
}
// GetHeader returns the Lz4 compression header
func (a *AlgLz4) GetHeader() []byte {
// Size is optional.
buf := a.buf[:]
// Set the fixed size data: magic number, block max size and flags.
binary.LittleEndian.PutUint32(buf[0:], frameMagic)
flg := byte(lz4.Version << 6)
flg |= 1 << 5 // No block dependency.
if a.Header.BlockChecksum {
flg |= 1 << 4
}
if a.Header.Size > 0 {
flg |= 1 << 3
}
buf[4] = flg
buf[5] = blockSizeValueToIndex(a.Header.BlockMaxSize) << 4
// Current buffer size: magic(4) + flags(1) + block max size (1).
n := 6
if a.Header.Size > 0 {
binary.LittleEndian.PutUint64(buf[n:], a.Header.Size)
n += 8
}
// The header checksum includes the flags, block max size and optional Size.
buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
// Header ready, write it out.
return buf[0 : n+1]
}
// GetFooter returns
func (a *AlgLz4) GetFooter() []byte {
return LZ4Footer
}
// CompressBlock that compresses a block using lz4
func (a *AlgLz4) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) {
if len(in) > 0 {
n, err := a.compressBlock(in, out)
if err != nil {
return 0, 0, err
}
return n, uint64(len(in)), nil
}
return 0, 0, nil
}
// compressBlock compresses a block.
func (a *AlgLz4) compressBlock(data []byte, dst io.Writer) (uint32, error) {
zdata := make([]byte, a.Header.BlockMaxSize) // The compressed block size cannot exceed the input's.
var zn int
if level := a.Header.CompressionLevel; level != 0 {
zn, _ = lz4.CompressBlockHC(data, zdata, level)
} else {
var hashTable [1 << 16]int
zn, _ = lz4.CompressBlock(data, zdata, hashTable[:])
}
var bLen uint32
if zn > 0 && zn < len(data) {
// Compressible and compressed size smaller than uncompressed: ok!
bLen = uint32(zn)
zdata = zdata[:zn]
} else {
// Uncompressed block.
bLen = uint32(len(data)) | compressedBlockFlag
zdata = data
}
// Write the block.
if err := a.writeUint32(bLen, dst); err != nil {
return 0, err
}
_, err := dst.Write(zdata)
if err != nil {
return 0, err
}
if !a.Header.BlockChecksum {
return bLen, nil
}
checksum := xxh32.ChecksumZero(zdata)
if err := a.writeUint32(checksum, dst); err != nil {
return 0, err
}
return bLen, nil
}
// writeUint32 writes a uint32 to the underlying writer.
func (a *AlgLz4) writeUint32(x uint32, dst io.Writer) error {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, x)
_, err := dst.Write(buf)
return err
}
func blockSizeValueToIndex(size int) byte {
return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
}
// DecompressBlock decompresses Lz4 compressed block
func (a *AlgLz4) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) {
// Get our compressed data
var b bytes.Buffer
_, err = io.Copy(&b, in)
if err != nil {
return 0, err
}
zdata := b.Bytes()
bLen := binary.LittleEndian.Uint32(zdata[:4])
if bLen&compressedBlockFlag > 0 {
// Uncompressed block.
bLen &= compressedBlockMask
if bLen > BlockSize {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
data := zdata[4 : bLen+4]
if a.Header.BlockChecksum {
checksum := binary.LittleEndian.Uint32(zdata[4+bLen:])
if h := xxh32.ChecksumZero(data); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
_, err := out.Write(data)
return len(data), err
}
// compressed block
if bLen > BlockSize {
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
}
if a.Header.BlockChecksum {
checksum := binary.LittleEndian.Uint32(zdata[4+bLen:])
if h := xxh32.ChecksumZero(zdata[4 : bLen+4]); h != checksum {
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
}
}
data := make([]byte, BlockSize)
n, err = lz4.UncompressBlock(zdata[4:bLen+4], data)
if err != nil {
return 0, err
}
_, err = out.Write(data[:n])
return n, err
}

Some files were not shown because too many files have changed in this diff Show More