1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-24 04:04:37 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
85882fa2de build: unify uploading in CI and add artifacts to Azure Pipelines
- remove compile_all step as we compile everything in the release step
- rename travis_beta to beta
- split into upload_beta and make depend on rclone
- use latest OSes in pipelines build
- Add workaround for cmd/mount tests lockup from #3154
2019-08-06 16:43:50 +01:00
944 changed files with 13787 additions and 117103 deletions

View File

@@ -40,11 +40,10 @@ build_script:
test_script:
- make GOTAGS=cmount quicktest
- make GOTAGS=cmount racequicktest
artifacts:
- path: rclone.exe
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make upload_beta

View File

@@ -14,30 +14,37 @@ jobs:
- run:
name: Cross-compile rclone
command: |
docker pull billziss/xgo-cgofuse
docker pull rclone/xgo-cgofuse
go get -v github.com/karalabe/xgo
xgo \
-image=billziss/xgo-cgofuse \
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
--image=rclone/xgo-cgofuse \
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
-tags cmount \
-dest build \
.
xgo \
-image=billziss/xgo-cgofuse \
-targets=android/*,ios/* \
-dest build \
--targets=android/*,ios/* \
.
- run:
name: Prepare artifacts
command: |
mkdir -p /tmp/rclone.dist
cp -R rclone-* /tmp/rclone.dist
mkdir build
cp -R rclone-* build/
- run:
name: Build rclone
command: |
docker pull golang
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
go version
go build
- run:
name: Upload artifacts
command: |
make circleci_upload
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
make circleci_upload
fi
- store_artifacts:
path: build
path: /tmp/rclone.dist

View File

@@ -1,206 +0,0 @@
---
# Github Actions build for rclone
# -*- compile-command: "yamllint -f parsable build.yml" -*-
name: build
# Trigger the workflow on push or pull request
on:
push:
branches:
- '*'
tags:
- '*'
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
include:
- job_name: linux
os: ubuntu-latest
go: '1.13.x'
modules: off
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
deploy: true
- job_name: mac
os: macOS-latest
go: '1.13.x'
modules: off
gotags: '' # cmount doesn't work on osx travis for some reason
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.13.x'
modules: off
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.13.x'
modules: off
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.13.x'
modules: off
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
compile_all: true
deploy: true
- job_name: modules_race
os: ubuntu-latest
go: '1.13.x'
modules: on
quicktest: true
racequicktest: true
- job_name: go1.10
os: ubuntu-latest
go: '1.10.x'
modules: off
quicktest: true
- job_name: go1.11
os: ubuntu-latest
go: '1.11.x'
modules: off
quicktest: true
- job_name: go1.12
os: ubuntu-latest
go: '1.12.x'
modules: off
quicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
- name: Install Go
uses: actions/setup-go@v1
with:
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Run tests
shell: bash
run: |
make
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
make release_dep
make travis_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
BETA_SUBDIR: 'github_actions' # FIXME remove when removing travis/appveyor
# working-directory: '$(modulePath)'
if: matrix.deploy && github.head_ref == ''

View File

@@ -50,6 +50,9 @@ matrix:
allow_failures:
- go: tip
include:
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
@@ -57,9 +60,6 @@ matrix:
script:
- make quicktest
- go: 1.12.x
script:
- make quicktest
- go: 1.13.x
name: Linux
env:
- GOTAGS=cmount
@@ -69,7 +69,7 @@ matrix:
- make build_dep
- make check
- make quicktest
- go: 1.13.x
- go: 1.12.x
name: Go Modules / Race
env:
- GO111MODULE=on
@@ -77,15 +77,14 @@ matrix:
script:
- make quicktest
- make racequicktest
- go: 1.13.x
- go: 1.12.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all
- go: 1.13.x
- go: 1.12.x
name: macOS
os: osx
env:
@@ -101,7 +100,7 @@ matrix:
- make racequicktest
# - os: windows
# name: Windows
# go: 1.13.x
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
@@ -120,9 +119,11 @@ matrix:
deploy:
provider: script
script: make travis_beta
script:
- make beta
- [[ "$TRAVIS_PULL_REQUEST" == "false" ]] && make upload_beta
skip_cleanup: true
on:
repo: rclone/rclone
all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
condition: $DEPLOY == true

View File

@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
go test -v -remote TestDrive: -subdir
cd fs/operations
go test -v -remote TestDrive:
@@ -362,7 +362,9 @@ Or if you want to run the integration tests manually:
* `go test -v -remote TestRemote:`
* `cd fs/sync`
* `go test -v -remote TestRemote:`
* If your remote defines `ListR` check with this also
* If you are making a bucket based remote, then check with this also
* `go test -v -remote TestRemote: -subdir`
* And if your remote defines `ListR` this also
* `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.

View File

@@ -1,22 +0,0 @@
FROM golang AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version
# Begin final image
FROM alpine:latest
RUN apk --no-cache add ca-certificates fuse
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
ENTRYPOINT [ "rclone" ]
WORKDIR /data
ENV XDG_CONFIG_HOME=/config

3120
MANUAL.html generated

File diff suppressed because it is too large Load Diff

3343
MANUAL.md generated

File diff suppressed because it is too large Load Diff

3422
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master
@@ -30,15 +30,12 @@ BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif
.PHONY: rclone test_all vars version
.PHONY: rclone vars version
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
touch fs/version.go
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone .
vars:
@echo SHELL="'$(SHELL)'"
@@ -53,7 +50,8 @@ version:
@echo '$(TAG)'
# Full suite of integration tests
test: rclone test_all
test: rclone
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log"
@@ -147,7 +145,7 @@ upload_github:
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta:
test_beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -158,13 +156,6 @@ log_since_last_release:
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
@@ -172,12 +163,14 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
travis_beta:
beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
upload_beta: rclone
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)

View File

@@ -1,4 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[![Logo](https://rclone.org/img/rclone-120x120.png)](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -6,7 +6,7 @@
[Contributing](CONTRIBUTING.md) |
[Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[Forum](https://forum.rclone.org/) |
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
@@ -14,7 +14,6 @@
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)
# Rclone
@@ -41,7 +40,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
@@ -54,8 +52,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)

View File

@@ -1,14 +1,8 @@
# Release
This file describes how to make the various kinds of releases
## Extra required software for making a release
Extra required software for making a release
* [github-release](https://github.com/aktau/github-release) for uploading packages
* pandoc for making the html and man pages
## Making a release
Making a release
* git status - make sure everything is checked in
* Check travis & appveyor builds are green
* make check
@@ -32,8 +26,8 @@ This file describes how to make the various kinds of releases
* # announce with forum post, twitter post, G+ post
Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update
* git status
* git add new files
@@ -54,57 +48,24 @@ Can be fixed with
* GO111MODULE=on go mod vendor
## Making a point release
If rclone needs a point release due to some horrendous bug then a
point release is necessary.
First make the release branch. If this is a second point release then
this will be done already.
* BASE_TAG=v1.XX # eg v1.49
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
Now
* git co ${BASE_TAG}-fixes
Making a point release. If rclone needs a point release due to some
horrendous bug, then
* git branch v1.XX v1.XX-fixes
* git cherry-pick any fixes
* Test (see above)
* make NEW_TAG=${NEW_TAG} tag
* make NEW_TAG=v1.XX.1 tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-fixes
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-fixes docs/content/changelog.md
* git commit --amend
* git push
* make TAG=v1.43.1 doc
* git commit -a -v -m "Version v1.XX.1"
* git tag -d -v1.XX.1
* git tag -s -m "Version v1.XX.1" v1.XX.1
* git push --tags -u origin v1.XX-fixes
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
* make TAG=v1.43.1 tarball
* make TAG=v1.43.1 sign_upload
* make TAG=v1.43.1 check_sign
* make TAG=v1.43.1 upload
* make TAG=v1.43.1 upload_website
* make TAG=v1.43.1 upload_github
* NB this overwrites the current beta so after the release, rebuild the last travis build
* Announce!
## Making a manual build of docker
The rclone docker image should autobuild on docker hub. If it doesn't
or needs to be updated then rebuild like this.
```
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
docker push rclone/rclone:1.49.1
docker push rclone/rclone:1.49
docker push rclone/rclone:1
docker push rclone/rclone:latest
```

234
azure-pipelines.yml Normal file
View File

@@ -0,0 +1,234 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
trigger:
branches:
include:
- '*'
tags:
include:
- '*'
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
GOMAXPROCS: 8 # workaround for cmd/mount tests locking up - see #3154
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
GO_INSTALL_ARCH: amd64
strategy:
matrix:
linux:
imageName: ubuntu-latest
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
BUILD_FLAGS: '-include "^linux/"'
MAKE_CHECK: true
MAKE_QUICKTEST: true
DEPLOY: true
mac:
imageName: macos-latest
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
DEPLOY: true
windows_amd64:
imageName: windows-latest
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
windows_386:
imageName: windows-latest
gorootDir: C:\
GO_VERSION: latest
GO_INSTALL_ARCH: 386
BUILD_FLAGS: '-include "^windows/386" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-latest
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
DEPLOY: true
modules_race:
imageName: ubuntu-latest
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
go1.9:
imageName: ubuntu-latest
gorootDir: /usr/local
GOCACHE: '' # build caching only came in go1.10
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-latest
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-latest
gorootDir: /usr/local
GO_VERSION: go1.11.12
MAKE_QUICKTEST: true
pool:
vmImage: $(imageName)
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
echo "##vso[task.setvariable variable=GO_LATEST]true"
echo "Latest Go version: $latestGo"
condition: eq( variables['GO_VERSION'], 'latest' )
displayName: "Get latest Go version"
- bash: |
sudo rm -f $(which go)
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
- task: CacheBeta@0
continueOnError: true
inputs:
key: go-build-cache | "$(Agent.JobName)"
path: $(GOCACHE)
displayName: Cache go build
condition: ne( variables['GOCACHE'], '' )
# Install Libraries (varies by platform)
- bash: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Libraries on Linux
- bash: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Libraries on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GO_INSTALL_ARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Go on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
Write-Host "Extracting Go"
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Go on Windows
# Display environment for debugging
- bash: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
# Run Tests
- bash: |
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
- bash: |
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
- bash: |
make build_dep
make check
workingDirectory: '$(modulePath)'
displayName: Code quality test
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make beta
workingDirectory: '$(modulePath)'
displayName: Do release build
condition: eq( variables['DEPLOY'], 'true' )
- bash: |
make upload_beta
env:
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
workingDirectory: '$(modulePath)'
displayName: Upload built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
- publish: $(modulePath)/build
artifact: "rclone-build-$(Agent.JobName)"
displayName: Publish built binaries
condition: eq( variables['DEPLOY'], 'true' )

View File

@@ -20,13 +20,10 @@ import (
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/sftp"

View File

@@ -16,6 +16,7 @@ import (
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"sync"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
@@ -143,20 +143,19 @@ type Options struct {
// Fs represents a remote azure server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
isLimited bool // if limited to one container
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
client *http.Client // http client we are using
svcURL *azblob.ServiceURL // reference to serviceURL
cntURL *azblob.ContainerURL // reference to containerURL
container string // the container we are working on
containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container
containerDeleted bool // true if we have deleted the container
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a azure object
@@ -180,18 +179,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.container
}
return f.container + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Azure root")
if f.root == "" {
return fmt.Sprintf("Azure container %s", f.container)
}
if f.rootDirectory == "" {
return fmt.Sprintf("Azure container %s", f.rootContainer)
}
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
}
// Features returns the optional features of this Fs
@@ -199,23 +198,21 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a azure path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a azure 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in azure path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// validateAccessTier checks if azureblob supports user supplied tier
func validateAccessTier(tier string) bool {
switch tier {
@@ -320,12 +317,6 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
@@ -347,6 +338,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.ListChunkSize > maxListChunkSize {
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
}
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
if opt.Endpoint == "" {
opt.Endpoint = storageDefaultBaseURL
}
@@ -361,25 +356,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
container: container,
root: directory,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
SetTier: true,
GetTier: true,
}).Fill(f)
var (
u *url.URL
serviceURL azblob.ServiceURL
u *url.URL
serviceURL azblob.ServiceURL
containerURL azblob.ContainerURL
)
switch {
case opt.UseEmulator:
@@ -393,6 +387,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil {
@@ -405,6 +400,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.SASURL != "":
u, err = url.Parse(opt.SASURL)
if err != nil {
@@ -415,30 +411,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Check if we have container level SAS or account level sas
parts := azblob.NewBlobURLParts(*u)
if parts.ContainerName != "" {
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
if container != "" && parts.ContainerName != container {
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
}
containerURL := azblob.NewContainerURL(*u, pipeline)
f.cntURLcache[parts.ContainerName] = &containerURL
f.isLimited = true
f.container = parts.ContainerName
containerURL = azblob.NewContainerURL(*u, pipeline)
} else {
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
}
default:
return nil, errors.New("Need account+key or connectionString or sasURL")
}
f.svcURL = &serviceURL
f.cntURL = &containerURL
if f.rootContainer != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
remote := path.Base(directory)
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
f.root = oldRoot
return f, nil
}
return nil, err
@@ -449,20 +453,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// return the container URL for the container passed in
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
f.cntURLcacheMu.Lock()
defer f.cntURLcacheMu.Unlock()
var ok bool
if containerURL, ok = f.cntURLcache[container]; !ok {
cntURL := f.svcURL.NewContainerURL(container)
containerURL = &cntURL
f.cntURLcache[container] = containerURL
}
return containerURL
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -492,8 +482,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// getBlobReference creates an empty blob reference with no metadata
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
return f.cntURL(container).NewBlobURL(containerPath)
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
return f.cntURL.NewBlobURL(f.root + remote)
}
// updateMetadataWithModTime adds the modTime passed in to o.meta.
@@ -529,18 +519,16 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied
//
// dir is the starting directory, "" for root
//
// The remote has prefix removed from it and if addContainer is set then
// it adds the container to the start.
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
if f.cache.IsDeleted(container) {
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
f.containerOKMu.Lock()
deleted := f.containerDeleted
f.containerOKMu.Unlock()
if deleted {
return fs.ErrorDirNotFound
}
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
root := f.root
if dir != "" {
root += dir + "/"
}
delimiter := ""
if !recurse {
@@ -555,14 +543,15 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
UncommittedBlobs: false,
Deleted: false,
},
Prefix: directory,
Prefix: root,
MaxResults: int32(maxResults),
}
directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse
err := f.pacer.Call(func() (bool, error) {
var err error
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
return f.shouldRetry(err)
})
@@ -582,17 +571,26 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
// }
if !strings.HasPrefix(file.Name, prefix) {
if !strings.HasPrefix(file.Name, f.root) {
fs.Debugf(f, "Odd name received %q", file.Name)
continue
}
remote := file.Name[len(prefix):]
remote := file.Name[len(f.root):]
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, file, true)
if err != nil {
return err
}
// Keep track of directory markers. If recursing then
// there will be no Prefixes so no need to keep track
if !recurse {
directoryMarkers[remote] = struct{}{}
}
continue // skip directory marker
}
if addContainer {
remote = path.Join(container, remote)
}
// Send object
err = fn(remote, file, false)
if err != nil {
@@ -602,13 +600,14 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
// Send the subdirectories
for _, remote := range response.Segment.BlobPrefixes {
remote := strings.TrimRight(remote.Name, "/")
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(remote, f.root) {
fs.Debugf(f, "Odd directory name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
remote = remote[len(f.root):]
// Don't send if already sent as a directory marker
if _, found := directoryMarkers[remote]; found {
continue
}
// Send object
err = fn(remote, nil, true)
@@ -633,9 +632,19 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
return o, nil
}
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerDeleted = false
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -649,24 +658,17 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
f.markContainerOK()
return entries, nil
}
// listContainers returns all the containers to out
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
if f.isLimited {
f.cntURLcacheMu.Lock()
for container := range f.cntURLcache {
d := fs.NewDir(container, time.Time{})
entries = append(entries, d)
}
f.cntURLcacheMu.Unlock()
return entries, nil
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
d := fs.NewDir(container.Name, container.Properties.LastModified)
f.cache.MarkOK(container.Name)
entries = append(entries, d)
return nil
})
@@ -686,14 +688,10 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
if f.container == "" {
return f.listContainers(dir)
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -713,43 +711,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
if f.container == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return list.Add(entry)
})
if err != nil {
return err
}
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush()
}
@@ -799,43 +776,86 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return fs, fs.Update(ctx, in, src, options...)
}
// Check if the container exists
//
// NB this can return incorrect results if called immediately after container deletion
func (f *Fs) dirExists() (bool, error) {
options := azblob.ListBlobsSegmentOptions{
Details: azblob.BlobListingDetails{
Copy: false,
Metadata: false,
Snapshots: false,
UncommittedBlobs: false,
Deleted: false,
},
MaxResults: 1,
}
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, nil
}
return false, err
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.containerOK {
return nil
}
if !f.containerDeleted {
exists, err := f.dirExists()
if err == nil {
f.containerOK = exists
}
if err != nil || exists {
return err
}
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.cache.MarkDeleted(container)
return true, err
}
// now try to create the container
err := f.pacer.Call(func() (bool, error) {
ctx := context.Background()
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case azblob.ServiceCodeContainerAlreadyExists:
f.containerOK = true
return false, nil
case azblob.ServiceCodeContainerBeingDeleted:
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
// When a container is deleted, a container with the same name cannot be created
// for at least 30 seconds; the container may not be available for more than 30
// seconds if the service is still processing the request.
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
f.containerDeleted = true
return true, err
}
}
return f.shouldRetry(err)
})
}, nil)
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = true
f.containerDeleted = false
}
return errors.Wrap(err, "failed to make container")
}
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
// isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
empty := true
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false
return nil
})
@@ -850,42 +870,47 @@ func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err erro
// deleteContainer deletes the container. It can delete a full
// container so use isEmpty if you don't want that.
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
return f.cache.Remove(container, func() error {
options := azblob.ContainerAccessConditions{}
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL(container).Delete(ctx, options)
}
func (f *Fs) deleteContainer() error {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
options := azblob.ContainerAccessConditions{}
ctx := context.Background()
err := f.pacer.Call(func() (bool, error) {
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
if err == nil {
_, err = f.cntURL.Delete(ctx, options)
}
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
if err != nil {
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
return false, fs.ErrorDirNotFound
}
return f.shouldRetry(err)
})
}
return f.shouldRetry(err)
})
if err == nil {
f.containerOK = false
f.containerDeleted = true
}
return errors.Wrap(err, "failed to delete container")
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.isEmpty(ctx, container, directory)
err := f.isEmpty(ctx, dir)
if err != nil {
return err
}
return f.deleteContainer(ctx, container)
if f.root != "" || dir != "" {
return nil
}
return f.deleteContainer()
}
// Precision of the remote
@@ -901,12 +926,11 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error {
dir := "" // forward compat!
container, directory := f.split(dir)
if container == "" || directory != "" {
// Delegate to caller if not root of a container
if f.root != "" || dir != "" {
// Delegate to caller if not root container
return fs.ErrorCantPurge
}
return f.deleteContainer(ctx, container)
return f.deleteContainer()
}
// Copy src to this remote using server side copy operations.
@@ -919,8 +943,7 @@ func (f *Fs) Purge(ctx context.Context) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -929,7 +952,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
dstBlobURL := f.getBlobReference(remote)
srcBlobURL := srcObj.getBlobReference()
source, err := url.Parse(srcBlobURL.String())
@@ -1062,8 +1085,7 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
// getBlobReference creates an empty blob reference with no metadata
func (o *Object) getBlobReference() azblob.BlobURL {
container, directory := o.split()
return o.fs.getBlobReference(container, directory)
return o.fs.getBlobReference(o.remote)
}
// clearMetaData clears enough metadata so readMetaData will re-read it
@@ -1115,7 +1137,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return err
}
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil
}
@@ -1163,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.AccessTier() == azblob.AccessTierArchive {
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
@@ -1369,8 +1391,7 @@ outer:
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
err = o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
@@ -1508,6 +1529,4 @@ var (
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -50,7 +50,7 @@ type Timestamp time.Time
// MarshalJSON turns a Timestamp into JSON (in UTC)
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
timestamp := (*time.Time)(t).UTC().UnixNano()
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
}
// UnmarshalJSON turns JSON into a Timestamp
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
if err != nil {
return err
}
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -104,14 +104,13 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
Method: "POST",
Path: "/b2_start_large_file",
}
bucket, bucketPath := o.split()
bucketID, err := f.getBucketID(ctx, bucket)
bucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
var request = api.StartLargeFileRequest{
BucketID: bucketID,
Name: bucketPath,
Name: o.fs.root + remote,
ContentType: fs.MimeType(ctx, src),
Info: map[string]string{
timeKey: timeString(modTime),
@@ -125,8 +124,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
}
var response api.StartLargeFileResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
@@ -150,7 +149,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
//
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
@@ -162,8 +161,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to get upload URL")
@@ -192,12 +191,12 @@ func (up *largeUpload) clearUploadURL() {
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
func (up *largeUpload) transferChunk(part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
upload, err := up.getUploadURL()
if err != nil {
return false, err
}
@@ -241,8 +240,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
retry, err := up.f.shouldRetry(resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
@@ -264,7 +263,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
}
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
func (up *largeUpload) finish() error {
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
opts := rest.Opts{
Method: "POST",
@@ -276,8 +275,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
var response api.FileInfo
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
if err != nil {
return err
@@ -286,7 +285,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
}
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
func (up *largeUpload) cancel() error {
opts := rest.Opts{
Method: "POST",
Path: "/b2_cancel_large_file",
@@ -296,18 +295,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
}
var response api.CancelLargeFileResponse
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
return up.f.shouldRetry(ctx, resp, err)
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
return up.f.shouldRetry(resp, err)
})
return err
}
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
wg.Add(1)
go func(part int64, buf []byte) {
defer wg.Done()
defer up.f.putUploadBlock(buf)
err := up.transferChunk(ctx, part, buf)
err := up.transferChunk(part, buf)
if err != nil {
select {
case errs <- err:
@@ -317,7 +316,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
}(part, buf)
}
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
if err == nil {
select {
case err = <-errs:
@@ -326,19 +325,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
}
if err != nil {
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
cancelErr := up.cancel(ctx)
cancelErr := up.cancel()
if cancelErr != nil {
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
}
return err
}
return up.finish(ctx)
return up.finish()
}
// Stream uploads the chunks from the input, starting with a required initial
// chunk. Assumes the file size is unknown and will upload until the input
// reaches EOF.
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
errs := make(chan error, 1)
hasMoreParts := true
@@ -346,7 +345,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
// Transfer initial chunk
up.size = int64(len(initialUploadBlock))
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
outer:
for part := int64(2); hasMoreParts; part++ {
@@ -388,16 +387,16 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
}
wg.Wait()
up.sha1s = up.sha1s[:up.parts]
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) error {
func (up *largeUpload) Upload() error {
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
remaining := up.size
errs := make(chan error, 1)
@@ -428,10 +427,10 @@ outer:
}
// Transfer the chunk
up.managedTransferChunk(ctx, &wg, errs, part, buf)
up.managedTransferChunk(&wg, errs, part, buf)
remaining -= reqSize
}
wg.Wait()
return up.finishOrCancelOnError(ctx, err, errs)
return up.finishOrCancelOnError(err, errs)
}

View File

@@ -202,23 +202,3 @@ type CommitUpload struct {
ContentModifiedAt Time `json:"content_modified_at"`
} `json:"attributes"`
}
// ConfigJSON defines the shape of a box config.json
type ConfigJSON struct {
BoxAppSettings AppSettings `json:"boxAppSettings"`
EnterpriseID string `json:"enterpriseID"`
}
// AppSettings defines the shape of the boxAppSettings within box config.json
type AppSettings struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
AppAuth AppAuth `json:"appAuth"`
}
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
type AppAuth struct {
PublicKeyID string `json:"publicKeyID"`
PrivateKey string `json:"privateKey"`
Passphrase string `json:"passphrase"`
}

View File

@@ -11,12 +11,8 @@ package box
import (
"context"
"crypto/rsa"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
@@ -25,10 +21,6 @@ import (
"strings"
"time"
"github.com/rclone/rclone/lib/jwtutil"
"github.com/youmark/pkcs8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
@@ -37,14 +29,12 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jws"
)
const (
@@ -59,7 +49,6 @@ const (
listChunks = 1000 // chunk size to read directory listings
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
defaultUploadCutoff = 50 * 1024 * 1024
tokenURL = "https://api.box.com/oauth2/token"
)
// Globals
@@ -84,34 +73,9 @@ func init() {
Description: "Box",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
} else {
err = oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
err := oauthutil.Config("box", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
@@ -120,19 +84,6 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
}, {
Name: "box_sub_type",
Default: "user",
Examples: []fs.OptionExample{{
Value: "user",
Help: "Rclone should act on behalf of a user",
}, {
Value: "enterprise",
Help: "Rclone should act on behalf of a service account",
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
@@ -147,74 +98,6 @@ func init() {
})
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, errors.Wrap(err, "box: failed to read Box config")
}
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, errors.Wrap(err, "box: failed to parse Box config")
}
return boxConfig, nil
}
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
val, err := jwtutil.RandomHex(20)
if err != nil {
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
}
claims = &jws.ClaimSet{
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
"aud": tokenURL,
"jti": val,
},
}
return claims, nil
}
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
signingHeaders := &jws.Header{
Algorithm: "RS256",
Typ: "JWT",
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
}
return signingHeaders
}
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
queryParams := map[string]string{
"client_id": boxConfig.BoxAppSettings.ClientID,
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
}
return queryParams
}
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if len(rest) > 0 {
return nil, errors.Wrap(err, "box: extra data included in private key")
}
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
if err != nil {
return nil, errors.Wrap(err, "box: failed to decrypt private key")
}
return rsaKey.(*rsa.PrivateKey), nil
}
// Options defines the configuration for this backend
type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -321,7 +204,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -469,7 +352,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -503,7 +386,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -525,7 +408,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/folders/" + dirID + "/items",
@@ -540,7 +423,7 @@ OUTER:
var result api.FolderItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -596,7 +479,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.Type == api.ItemTypeFolder {
// cache the directory ID for later lookups
@@ -698,14 +581,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/" + id,
NoResponse: true,
}
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
@@ -736,7 +619,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -809,7 +692,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info *api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyFile, &info)
resp, err = f.srv.CallJSON(&opts, &copyFile, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -832,7 +715,7 @@ func (f *Fs) Purge(ctx context.Context) error {
}
// move a file or folder
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
// Move the object
opts := rest.Opts{
Method: "PUT",
@@ -847,7 +730,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -879,7 +762,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// Do the move
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
if err != nil {
return nil, err
}
@@ -962,7 +845,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Do the move
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
_, err = f.move("/folders/", srcID, leaf, directoryID)
if err != nil {
return err
}
@@ -1004,7 +887,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
var info api.Item
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
return shouldRetry(resp, err)
})
return info.SharedLink.URL, err
@@ -1123,7 +1006,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
@@ -1156,7 +1039,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1168,7 +1051,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: replaceReservedChars(leaf),
ContentModifiedAt: api.Time(modTime),
@@ -1195,7 +1078,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
opts.Path = "/files/content"
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1228,16 +1111,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime)
err = o.upload(in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
}
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id)
return o.fs.deleteObject(o.id)
}
// ID returns the ID of the Object if known, or "" if not

View File

@@ -4,7 +4,6 @@ package box
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/json"
@@ -23,7 +22,7 @@ import (
)
// createUploadSession creates an upload session for the object
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions",
@@ -42,7 +41,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
return
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
opts.Body = wrap(bytes.NewReader(chunk))
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
}
// commitUpload finishes an upload session
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/files/upload_sessions/" + SessionID + "/commit",
@@ -105,7 +104,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
outer:
for tries = 0; tries < maxTries; tries++ {
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
if err != nil {
return shouldRetry(resp, err)
}
@@ -155,7 +154,7 @@ outer:
}
// abortUpload cancels an upload session
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
func (o *Object) abortUpload(SessionID string) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/files/upload_sessions/" + SessionID,
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
session, err := o.createUploadSession(leaf, directoryID, size)
if err != nil {
return errors.Wrap(err, "multipart upload create session failed")
}
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
defer func() {
if err != nil {
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.abortUpload(ctx, session.ID)
cancelErr := o.abortUpload(session.ID)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
@@ -236,7 +235,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {
@@ -264,7 +263,7 @@ outer:
}
// Finalise the upload session
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
if err != nil {
return errors.Wrap(err, "multipart upload failed to finalize")
}

View File

@@ -1864,24 +1864,6 @@ func cleanPath(p string) string {
return p
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1897,6 +1879,4 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
)

View File

@@ -705,16 +705,16 @@ var (
// Test test infrastructure first!
func TestRandomSource(t *testing.T) {
source := newRandomSource(1e8)
sink := newRandomSource(1e8)
source := newRandomSource(1E8)
sink := newRandomSource(1E8)
n, err := io.Copy(sink, source)
assert.NoError(t, err)
assert.Equal(t, int64(1e8), n)
assert.Equal(t, int64(1E8), n)
source = newRandomSource(1e8)
source = newRandomSource(1E8)
buf := make([]byte, 16)
_, _ = source.Read(buf)
sink = newRandomSource(1e8)
sink = newRandomSource(1E8)
_, err = io.Copy(sink, source)
assert.Error(t, err, "Error in stream")
}
@@ -754,23 +754,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
}
func TestEncryptDecrypt1(t *testing.T) {
testEncryptDecrypt(t, 1, 1e7)
testEncryptDecrypt(t, 1, 1E7)
}
func TestEncryptDecrypt32(t *testing.T) {
testEncryptDecrypt(t, 32, 1e8)
testEncryptDecrypt(t, 32, 1E8)
}
func TestEncryptDecrypt4096(t *testing.T) {
testEncryptDecrypt(t, 4096, 1e8)
testEncryptDecrypt(t, 4096, 1E8)
}
func TestEncryptDecrypt65536(t *testing.T) {
testEncryptDecrypt(t, 65536, 1e8)
testEncryptDecrypt(t, 65536, 1E8)
}
func TestEncryptDecrypt65537(t *testing.T) {
testEncryptDecrypt(t, 65537, 1e8)
testEncryptDecrypt(t, 65537, 1E8)
}
var (
@@ -803,7 +803,7 @@ func TestEncryptData(t *testing.T) {
} {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
// Check encode works
buf := bytes.NewBuffer(test.in)
@@ -826,7 +826,7 @@ func TestEncryptData(t *testing.T) {
func TestNewEncrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
z := &zeroes{}
@@ -853,7 +853,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -885,7 +885,7 @@ func (c *closeDetector) Close() error {
func TestNewDecrypter(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
cd := newCloseDetector(bytes.NewBuffer(file0))
fh, err := c.newDecrypter(cd)
@@ -936,7 +936,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}

View File

@@ -802,24 +802,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
return newDir
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.Fs.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.Fs.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
//
// This encrypts the remote name and adjusts the size
@@ -906,8 +888,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil)

View File

@@ -156,7 +156,6 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -178,7 +177,7 @@ func init() {
log.Fatalf("Failed to configure token: %v", err)
}
}
err = configTeamDrive(ctx, opt, m, name)
err = configTeamDrive(opt, m, name)
if err != nil {
log.Fatalf("Failed to configure team drive: %v", err)
}
@@ -664,7 +663,7 @@ OUTER:
for {
var files *drive.FileList
err = f.pacer.Call(func() (bool, error) {
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
files, err = list.Fields(googleapi.Field(fields)).Do()
return shouldRetry(err)
})
if err != nil {
@@ -779,7 +778,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
// Stop if we are running non-interactive config
if fs.Config.AutoConfirm {
return nil
@@ -807,7 +806,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
for {
var teamDrives *drive.TeamDriveList
err = newPacer(opt).Call(func() (bool, error) {
teamDrives, err = listTeamDrives.Context(ctx).Do()
teamDrives, err = listTeamDrives.Do()
return shouldRetry(err)
})
if err != nil {
@@ -1735,7 +1734,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
}
} else {
// Upload the file in chunks
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
if err != nil {
return nil, err
}
@@ -1942,9 +1941,6 @@ func (f *Fs) Purge(ctx context.Context) error {
if f.root == "" {
return errors.New("can't purge root directory")
}
if f.opt.TrashedOnly {
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
}
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
@@ -1976,7 +1972,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
err := f.svc.Files.EmptyTrash().Do()
return shouldRetry(err)
})
@@ -1995,7 +1991,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var about *drive.About
var err error
err = f.pacer.Call(func() (bool, error) {
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
about, err = f.svc.About.Get().Fields("storageQuota").Do()
return shouldRetry(err)
})
if err != nil {
@@ -2254,7 +2250,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
}
}
fs.Debugf(f, "Checking for changes on remote")
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
@@ -2276,7 +2272,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
return startPageToken.StartPageToken, nil
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
pageToken := startPageToken
for {
var changeList *drive.ChangeList
@@ -2292,7 +2288,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
if f.isTeamDrive {
changesCall.TeamDriveId(f.opt.TeamDriveID)
}
changeList, err = changesCall.Context(ctx).Do()
changeList, err = changesCall.Do()
return shouldRetry(err)
})
if err != nil {
@@ -2500,7 +2496,7 @@ func (o *baseObject) Storable() bool {
// httpResponse gets an http.Response object for the object
// using the url and method passed in
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
if url == "" {
return nil, nil, errors.New("forbidden to download - check sharing permission")
}
@@ -2508,7 +2504,6 @@ func (o *baseObject) httpResponse(ctx context.Context, url, method string, optio
if err != nil {
return req, nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
@@ -2579,8 +2574,8 @@ func isGoogleError(err error, what string) bool {
}
// open a url for reading
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(ctx, url, "GET", options)
func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
_, res, err := o.httpResponse(url, "GET", options)
if err != nil {
if isGoogleError(err, "cannotDownloadAbusiveFile") {
if o.fs.opt.AcknowledgeAbuse {
@@ -2591,7 +2586,7 @@ func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOpt
url += "?"
}
url += "acknowledgeAbuse=true"
_, res, err = o.httpResponse(ctx, url, "GET", options)
_, res, err = o.httpResponse(url, "GET", options)
} else {
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
}
@@ -2620,7 +2615,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.v2Download = false
}
}
return o.baseObject.open(ctx, o.url, options...)
return o.baseObject.open(o.url, options...)
}
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from
@@ -2645,7 +2640,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
if offset != 0 {
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
}
in, err = o.baseObject.open(ctx, o.url, options...)
in, err = o.baseObject.open(o.url, options...)
if in != nil {
in = &openDocumentFile{o: o, in: in}
}
@@ -2680,7 +2675,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
src fs.ObjectInfo) (info *drive.File, err error) {
// Make the API request to upload metadata and file data.
size := src.Size()
@@ -2698,7 +2693,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
return
}
// Upload the file in chunks
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
}
// Update the already existing object
@@ -2712,7 +2707,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
}
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
if err != nil {
return err
}
@@ -2749,7 +2744,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
}
updateInfo.MimeType = importMimeType
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
if err != nil {
return err
}

View File

@@ -11,7 +11,6 @@
package drive
import (
"context"
"encoding/json"
"fmt"
"io"
@@ -51,7 +50,7 @@ type resumableUpload struct {
}
// Upload the io.Reader in of size bytes with contentType and info
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
params := url.Values{
"alt": {"json"},
"uploadType": {"resumable"},
@@ -82,7 +81,6 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
googleapi.Expand(req.URL, map[string]string{
"fileId": fileID,
})
@@ -108,13 +106,12 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
MediaType: contentType,
ContentLength: size,
}
return rx.Upload(ctx)
return rx.Upload()
}
// Make an http.Request for the range passed in
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
req, _ := http.NewRequest("POST", rx.URI, body)
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.ContentLength = reqSize
if reqSize != 0 {
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
@@ -132,8 +129,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
// Query drive for the amount transferred so far
//
// If error is nil, then start should be valid
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
req := rx.makeRequest(ctx, 0, nil, 0)
func (rx *resumableUpload) transferStatus() (start int64, err error) {
req := rx.makeRequest(0, nil, 0)
res, err := rx.f.client.Do(req)
if err != nil {
return 0, err
@@ -160,9 +157,9 @@ func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err
}
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
_, _ = chunk.Seek(0, io.SeekStart)
req := rx.makeRequest(ctx, start, chunk, chunkSize)
req := rx.makeRequest(start, chunk, chunkSize)
res, err := rx.f.client.Do(req)
if err != nil {
return 599, err
@@ -195,7 +192,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
// Upload uploads the chunks from the input
// It retries each chunk using the pacer and --low-level-retries
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
func (rx *resumableUpload) Upload() (*drive.File, error) {
start := int64(0)
var StatusCode int
var err error
@@ -210,7 +207,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
// Transfer the chunk
err = rx.f.pacer.Call(func() (bool, error) {
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
again, err := shouldRetry(err)
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
again = false

View File

@@ -975,7 +975,6 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -32,7 +32,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
@@ -44,7 +44,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
resp, err := f.rest.CallJSON(&opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
@@ -72,7 +72,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
@@ -88,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
return entries, nil
}
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
@@ -101,7 +101,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
resp, err := f.rest.CallJSON(&opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
@@ -111,7 +111,7 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
return filesList, nil
}
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
@@ -125,7 +125,7 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
@@ -153,12 +153,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
return nil, err
}
files, err := f.listFiles(ctx, folderID)
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(ctx, folderID)
folders, err := f.listFolders(folderID)
if err != nil {
return nil, err
}
@@ -166,7 +166,6 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
item.Filename = restoreReservedChars(item.Filename)
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
@@ -205,7 +204,7 @@ func getRemote(dir, fileName string) string {
return dir + "/" + fileName
}
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
@@ -221,7 +220,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
resp, err := f.rest.CallJSON(&opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -233,7 +232,7 @@ func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (respons
return response, err
}
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
@@ -248,7 +247,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
resp, err = f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -263,7 +262,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return response, nil
}
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
@@ -277,7 +276,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
resp, err := f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
@@ -290,7 +289,7 @@ func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKRes
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
@@ -301,7 +300,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -313,11 +312,9 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = replaceReservedChars(fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
@@ -343,7 +340,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
resp, err := f.rest.CallJSON(&opts, nil, nil)
return shouldRetry(resp, err)
})
@@ -356,7 +353,7 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
return response, err
}
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
@@ -377,7 +374,7 @@ func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (re
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})

View File

@@ -74,7 +74,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
if err != nil {
return "", false, err
}
folders, err := f.listFolders(ctx, folderID)
folders, err := f.listFolders(folderID)
if err != nil {
return "", false, err
}
@@ -95,7 +95,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
if err != nil {
return "", err
}
resp, err := f.makeFolder(ctx, leaf, folderID)
resp, err := f.makeFolder(leaf, folderID)
if err != nil {
return "", err
}
@@ -251,7 +251,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
files, err := f.listFiles(ctx, folderID)
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
@@ -298,13 +298,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100e9) {
if size > int64(100E9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode(ctx)
nodeResponse, err := f.getUploadNode()
if err != nil {
return nil, err
}
@@ -314,12 +314,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
@@ -393,7 +393,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return err
}
_, err = f.removeFolder(ctx, dir, folderID)
_, err = f.removeFolder(dir, folderID)
if err != nil {
return err
}

View File

@@ -75,7 +75,7 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
if err != nil {
return nil, err
@@ -89,7 +89,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(ctx, &opts)
resp, err = o.fs.rest.Call(&opts)
return shouldRetry(resp, err)
})
@@ -131,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(ctx, o.file.URL)
_, err := o.fs.deleteFile(o.file.URL)
if err != nil {
return err

View File

@@ -23,7 +23,9 @@ import (
"net/http"
"os"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -36,7 +38,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2"
@@ -263,16 +264,16 @@ type Options struct {
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of bucket status
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
svc *storage.Service // the connection to the storage server
client *http.Client // authorized client
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
pacer *fs.Pacer // To pace the API calls
}
// Object describes a storage object
@@ -297,18 +298,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("GCS root")
if f.root == "" {
return fmt.Sprintf("Storage bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
}
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
@@ -340,23 +341,21 @@ func shouldRetry(err error) (again bool, errOut error) {
return again, err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a storage path
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
// parseParse parses a storage 'url'
func parsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find bucket in storage path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
if err != nil {
@@ -366,15 +365,8 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
var oAuthClient *http.Client
// Parse config into Options struct
@@ -414,19 +406,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
bucket, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
bucket: bucket,
root: directory,
opt: *opt,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -436,18 +431,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
}
if f.rootBucket != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Context(ctx).Do()
_, err = f.svc.Objects.Get(bucket, directory).Do()
return shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -458,7 +455,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -466,7 +463,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
@@ -477,7 +474,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// listFn is called from list to handle an object.
@@ -488,24 +485,20 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
//
// The remote has prefix removed from it and if addBucket is set
// then it adds the bucket to the start.
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
if prefix != "" {
prefix += "/"
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
root := f.root
rootLength := len(root)
if dir != "" {
root += dir + "/"
}
if directory != "" {
directory += "/"
}
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
if !recurse {
list = list.Delimiter("/")
}
for {
var objects *storage.Objects
err = f.pacer.Call(func() (bool, error) {
objects, err = list.Context(ctx).Do()
objects, err = list.Do()
return shouldRetry(err)
})
if err != nil {
@@ -518,36 +511,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
if !recurse {
var object storage.Object
for _, remote := range objects.Prefixes {
if !strings.HasSuffix(remote, "/") {
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix) : len(remote)-1]
if addBucket {
remote = path.Join(bucket, remote)
}
err = fn(remote, &object, true)
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
}
for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, prefix) {
if !strings.HasPrefix(object.Name, root) {
fs.Logf(f, "Odd name received %q", object.Name)
continue
}
remote := object.Name[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
remote := object.Name[rootLength:]
// is this a directory marker?
if isDirectory && object.Size == 0 {
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
err = fn(remote[:len(remote)-1], object, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -564,23 +552,32 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
@@ -593,12 +590,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, err
}
// listBuckets lists the buckets
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
if f.opt.ProjectNumber == "" {
return nil, errors.New("can't list buckets without project number")
}
@@ -606,7 +606,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
for {
var buckets *storage.Buckets
err = f.pacer.Call(func() (bool, error) {
buckets, err = listBuckets.Context(ctx).Do()
buckets, err = listBuckets.Do()
return shouldRetry(err)
})
if err != nil {
@@ -634,14 +634,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -661,43 +657,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
@@ -722,55 +697,58 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
return f.cache.Create(bucket, func() error {
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
return shouldRetry(err)
})
if err == nil {
// Bucket already exists
f.bucketOK = true
return nil
} else if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code != http.StatusNotFound {
return errors.Wrap(err, "failed to get bucket")
}
} else {
return errors.Wrap(err, "failed to get bucket")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
if f.opt.ProjectNumber == "" {
return errors.New("can't make bucket without project number")
}
bucket := storage.Bucket{
Name: bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
bucket := storage.Bucket{
Name: f.bucket,
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
return f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Context(ctx).Do()
return shouldRetry(err)
})
}, nil)
_, err = insertBucket.Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = true
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
@@ -778,16 +756,19 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
return f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
return shouldRetry(err)
})
err = f.pacer.Call(func() (bool, error) {
err = f.svc.Buckets.Delete(f.bucket).Do()
return shouldRetry(err)
})
if err == nil {
f.bucketOK = false
}
return err
}
// Precision returns the precision
@@ -805,8 +786,7 @@ func (f *Fs) Precision() time.Duration {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -815,7 +795,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
// Temporary Object under construction
dstObj := &Object{
@@ -823,13 +802,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
srcBucket := srcObj.fs.bucket
srcObject := srcObj.fs.root + srcObj.remote
dstBucket := f.bucket
dstObject := f.root + remote
var newObject *storage.Object
err = f.pacer.Call(func() (bool, error) {
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
return shouldRetry(err)
})
if err != nil {
@@ -912,33 +891,24 @@ func (o *Object) setMetaData(info *storage.Object) {
}
}
// readObjectInfo reads the definition for an object
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
bucket, bucketPath := o.split()
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if !o.modTime.IsZero() {
return nil
}
var object *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
return fs.ErrorObjectNotFound
}
}
return nil, err
}
return object, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() {
return nil
}
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
}
o.setMetaData(object)
@@ -950,7 +920,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -967,27 +937,15 @@ func metadataFromModTime(modTime time.Time) map[string]string {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// read the complete existing object first
object, err := o.readObjectInfo(ctx)
if err != nil {
return err
// This only adds metadata so will perserve other metadata
object := storage.Object{
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
Metadata: metadataFromModTime(modTime),
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()
var newObject *storage.Object
err = o.fs.pacer.Call(func() (bool, error) {
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
if !o.fs.opt.BucketPolicyOnly {
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = copyObject.Context(ctx).Do()
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
return shouldRetry(err)
})
if err != nil {
@@ -1008,8 +966,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1037,26 +993,25 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
modTime := src.ModTime(ctx)
object := storage.Object{
Bucket: bucket,
Name: bucketPath,
Bucket: o.fs.bucket,
Name: o.fs.root + o.remote,
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Context(ctx).Do()
newObject, err = insertObject.Do()
return shouldRetry(err)
})
if err != nil {
@@ -1069,9 +1024,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err)
})
return err

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/oauthutil"
@@ -61,8 +60,6 @@ var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite,
},
Endpoint: google.Endpoint,
@@ -146,20 +143,18 @@ type Options struct {
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the one drive server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
albumsMu sync.Mutex // protect albums (but not contents)
albums map[bool]*albums // albums, shared or not
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
albumsMu sync.Mutex // protect albums (but not contents)
albums map[bool]*albums // albums, shared or not
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
}
// Object describes a storage object
@@ -246,8 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
baseClient := fshttp.NewClient(fs.Config)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
@@ -256,14 +250,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if root == "." || root == "/" {
root = ""
}
f := &Fs{
name: name,
root: root,
opt: *opt,
unAuth: rest.NewClient(baseClient),
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
ts: ts,
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
@@ -289,85 +280,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return f, nil
}
// fetchEndpoint gets the openid endpoint named from the Google config
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) {
// Get openID config without auth
opts := rest.Opts{
Method: "GET",
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
}
var openIDconfig map[string]interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
return shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't read openID config")
}
// Find userinfo endpoint
endpoint, ok := openIDconfig[name].(string)
if !ok {
return "", errors.Errorf("couldn't find %q from openID config", name)
}
return endpoint, nil
}
// UserInfo fetches info about the current user with oauth2
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint")
if err != nil {
return nil, err
}
// Fetch the user info with auth
opts := rest.Opts{
Method: "GET",
RootURL: endpoint,
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't read user info")
}
return userInfo, nil
}
// Disconnect kills the token and refresh token
func (f *Fs) Disconnect(ctx context.Context) (err error) {
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint")
if err != nil {
return err
}
token, err := f.ts.Token()
if err != nil {
return err
}
// Revoke the token and the refresh token
opts := rest.Opts{
Method: "POST",
RootURL: endpoint,
MultipartParams: url.Values{
"token": []string{token.AccessToken},
"token_type_hint": []string{"access_token"},
},
}
var res interface{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't revoke token")
}
fs.Infof(f, "res = %+v", res)
return nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
@@ -423,7 +335,7 @@ func findID(name string) string {
// list the albums into an internal cache
// FIXME cache invalidation
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
f.albumsMu.Lock()
defer f.albumsMu.Unlock()
all, ok := f.albums[shared]
@@ -445,7 +357,7 @@ func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err erro
var result api.ListAlbums
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -482,7 +394,7 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) {
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:search",
@@ -494,7 +406,7 @@ func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err
var result api.MediaItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
resp, err = f.srv.CallJSON(&opts, &filter, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -543,7 +455,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
if err != nil {
return err
@@ -638,7 +550,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
var result api.Album
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
resp, err = f.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -654,7 +566,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
f.createMu.Lock()
defer f.createMu.Unlock()
albums, err := f.listAlbums(ctx, false)
albums, err := f.listAlbums(false)
if err != nil {
return nil, err
}
@@ -708,7 +620,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
return err
}
albumTitle := match[1]
allAlbums, err := f.listAlbums(ctx, false)
allAlbums, err := f.listAlbums(false)
if err != nil {
return err
}
@@ -773,7 +685,7 @@ func (o *Object) Size() int64 {
RootURL: o.downloadURL(),
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -824,7 +736,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
var item api.MediaItem
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
return shouldRetry(resp, err)
})
if err != nil {
@@ -901,7 +813,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -954,8 +866,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var token []byte
var resp *http.Response
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
if err != nil {
_ = resp.Body.Close()
return shouldRetry(resp, err)
}
token, err = rest.ReadBody(resp)
@@ -986,7 +899,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1029,7 +942,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1050,10 +963,8 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.UserInfoer = &Fs{}
_ fs.Disconnecter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -20,7 +20,7 @@ import (
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listAlbums(shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
@@ -296,7 +296,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(ctx, shared)
albums, err := f.listAlbums(shared)
if err != nil {
return nil, err
}

View File

@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
}
// mock listAlbums for testing
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
return f.albums, nil
}

View File

@@ -13,7 +13,6 @@ import (
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -47,21 +46,6 @@ func init() {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
@@ -78,26 +62,6 @@ Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests to find file sizes in dir listing
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
- find its size
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -105,10 +69,8 @@ If you set this option, rclone will not do the HEAD request. This will mean
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
NoHead bool `config:"no_head"`
Headers fs.CommaSepList `config:"headers"`
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
}
// Fs stores the interface to the remote HTTP files
@@ -146,7 +108,6 @@ func statusError(res *http.Response, err error) error {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -154,10 +115,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
if !strings.HasSuffix(opt.Endpoint, "/") {
opt.Endpoint += "/"
}
@@ -183,15 +140,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequest("HEAD", u.String(), nil)
res, err := noRedir.Head(u.String())
err = statusError(res, err)
if err == nil {
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
isFile = true
}
}
@@ -261,7 +213,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.stat(ctx)
err := o.stat()
if err != nil {
return nil, err
}
@@ -364,22 +316,8 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
key := opt.Headers[i]
value := opt.Headers[i+1]
req.Header.Add(key, value)
}
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(req *http.Request) {
addHeaders(req, &f.opt)
}
// Read the directory passed in
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
func (f *Fs) readDir(dir string) (names []string, err error) {
URL := f.url(dir)
u, err := url.Parse(URL)
if err != nil {
@@ -388,14 +326,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
if !strings.HasSuffix(URL, "/") {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
// Do the request
req, err := http.NewRequest("GET", URL, nil)
if err != nil {
return nil, errors.Wrap(err, "readDir failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
f.addHeaders(req)
res, err := f.httpClient.Do(req)
res, err := f.httpClient.Get(URL)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
@@ -433,53 +364,34 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/"
}
names, err := f.readDir(ctx, dir)
names, err := f.readDir(dir)
if err != nil {
return nil, errors.Wrapf(err, "error listing %q", dir)
}
var (
entriesMu sync.Mutex // to protect entries
wg sync.WaitGroup
in = make(chan string, fs.Config.Checkers)
)
add := func(entry fs.DirEntry) {
entriesMu.Lock()
entries = append(entries, entry)
entriesMu.Unlock()
}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for remote := range in {
file := &Object{
fs: f,
remote: remote,
}
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
// ...found a directory not a file
add(fs.NewDir(remote, timeUnset))
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}()
}
for _, name := range names {
isDir := name[len(name)-1] == '/'
name = strings.TrimRight(name, "/")
remote := path.Join(dir, name)
if isDir {
add(fs.NewDir(remote, timeUnset))
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
} else {
in <- remote
file := &Object{
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
fs.Debugf(remote, "skipping because of error: %v", err)
}
}
}
close(in)
wg.Wait()
return entries, nil
}
@@ -536,21 +448,9 @@ func (o *Object) url() string {
}
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
o.contentType = fs.MimeType(ctx, o)
return nil
}
func (o *Object) stat() error {
url := o.url()
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return errors.Wrap(err, "stat failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
o.fs.addHeaders(req)
res, err := o.fs.httpClient.Do(req)
res, err := o.fs.httpClient.Head(url)
if err == nil && res.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
@@ -597,13 +497,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, errors.Wrap(err, "Open failed")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
// Add optional headers
for k, v := range fs.OpenOptionHeaders(options) {
req.Header.Add(k, v)
}
o.fs.addHeaders(req)
// Do the request
res, err := o.fs.httpClient.Do(req)

View File

@@ -10,7 +10,6 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
@@ -27,7 +26,6 @@ var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -35,16 +33,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
ts := httptest.NewServer(fileServer)
// Configure the remote
config.LoadConfig()
@@ -55,9 +45,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// config.FileSet(remoteName, "url", ts.URL)
m := configmap.Simple{
"type": "http",
"url": ts.URL,
"headers": strings.Join(headers, ","),
"type": "http",
"url": ts.URL,
}
// return a function to tidy up

View File

@@ -1,7 +1,6 @@
package hubic
import (
"context"
"net/http"
"time"
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
err = a.f.getCredentials()
if err == nil {
break
}

View File

@@ -7,7 +7,6 @@ package hubic
// to be revisted after some actual experience.
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -116,12 +115,11 @@ func (f *Fs) String() string {
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
func (f *Fs) getCredentials() (err error) {
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
resp, err := f.client.Do(req)
if err != nil {
return err

View File

@@ -46,82 +46,6 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
type CustomerInfo struct {
Username string `json:"username"`
Email string `json:"email"`
Name string `json:"name"`
CountryCode string `json:"country_code"`
LanguageCode string `json:"language_code"`
CustomerGroupCode string `json:"customer_group_code"`
BrandCode string `json:"brand_code"`
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
WebHash string `json:"web_hash"`
AndroidHash string `json:"android_hash"`
IOSHash string `json:"ios_hash"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present
type Flag bool
@@ -140,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
return attr, errors.New("unimplemented")
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
/*
GET http://www.jottacloud.com/JFS/<account>
@@ -169,8 +102,8 @@ GET http://www.jottacloud.com/JFS/<account>
</user>
*/
// DriveInfo represents a Jottacloud account
type DriveInfo struct {
// AccountInfo represents a Jottacloud account
type AccountInfo struct {
Username string `xml:"username"`
AccountType string `xml:"account-type"`
Locked bool `xml:"locked"`
@@ -347,3 +280,43 @@ func (e *Error) Error() string {
}
return out
}
// AllocateFileRequest to prepare an upload to Jottacloud
type AllocateFileRequest struct {
Bytes int64 `json:"bytes"`
Created string `json:"created"`
Md5 string `json:"md5"`
Modified string `json:"modified"`
Path string `json:"path"`
}
// AllocateFileResponse for upload requests
type AllocateFileResponse struct {
Name string `json:"name"`
Path string `json:"path"`
State string `json:"state"`
UploadID string `json:"upload_id"`
UploadURL string `json:"upload_url"`
Bytes int64 `json:"bytes"`
ResumePos int64 `json:"resume_pos"`
}
// UploadResponse after an upload
type UploadResponse struct {
Name string `json:"name"`
Path string `json:"path"`
Kind string `json:"kind"`
ContentID string `json:"content_id"`
Bytes int64 `json:"bytes"`
Md5 string `json:"md5"`
Created int64 `json:"created"`
Modified int64 `json:"modified"`
Deleted interface{} `json:"deleted"`
Mime string `json:"mime"`
}
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

View File

@@ -44,13 +44,14 @@ const (
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
apiURL = "https://api.jottacloud.com/files/v1/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
@@ -77,7 +78,6 @@ func init() {
Description: "JottaCloud",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
@@ -87,9 +87,34 @@ func init() {
}
srv := rest.NewClient(fshttp.NewClient(fs.Config))
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
deviceRegistration, err := registerDevice(ctx, srv)
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration api.DeviceRegistrationResponse
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
@@ -110,14 +135,53 @@ func init() {
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
fmt.Printf("Username> ")
username := config.ReadLine()
username, ok := m.Get(configUsername)
if !ok {
log.Fatalf("No username defined")
}
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuth(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
}
}
if err != nil {
log.Fatalf("Failed to get resource token: %v", err)
}
}
var token oauth2.Token
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
// finally save them in the config
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
@@ -131,17 +195,39 @@ func init() {
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
acc, err := getAccountInfo(srv, username)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
log.Fatalf("Error getting devices: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
result := config.Choose("Devices", deviceNames, nil, false)
m.Set(configDevice, result)
dev, err := getDeviceInfo(srv, path.Join(username, result))
if err != nil {
log.Fatalf("Error getting Mountpoint: %s", err)
}
if len(dev.MountPoints) == 0 {
log.Fatalf("No Mountpoints found for this device.")
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
result = config.Choose("Mountpoints", mountpointNames, nil, false)
m.Set(configMountpoint, result)
}
},
Options: []fs.Option{{
Name: configUsername,
Help: "User Name:",
}, {
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
@@ -167,6 +253,7 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
User string `config:"user"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
@@ -246,169 +333,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
return info, nil
}
// getDriveInfo queries general information about the account and the available devices and mountpoints.
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: username,
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get drive info")
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get device info")
}
return info, nil
}
// setEndpointURL generates the API endpoint URL
func (f *Fs) setEndpointURL() {
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "GET",
Path: f.filePath(path),
@@ -416,7 +342,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
var result api.JottaFile
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -436,6 +362,54 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Jo
return &result, nil
}
// getAccountInfo queries general information about the account.
// Takes rest.Client and username as parameter to be easily usable
// during config
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(username),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil {
return nil, err
}
return info, nil
}
// setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL() (err error) {
info, err := getAccountInfo(f.srv, f.user)
if err != nil {
return errors.Wrap(err, "failed to get endpoint url")
}
if f.opt.Device == "" {
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
return nil
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
@@ -468,6 +442,11 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
@@ -493,7 +472,6 @@ func grantTypeFilter(req *http.Request) {
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -533,6 +511,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
user: opt.User,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
@@ -548,16 +527,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
_, err := f.readMetaDataForPath("")
return err
})
cust, err := getCustomerInfo(ctx, f.apiSrv)
err = f.setEndpointURL()
if err != nil {
return nil, err
return nil, errors.Wrap(err, "couldn't get account info")
}
f.user = cust.Username
f.setEndpointURL()
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
@@ -584,7 +561,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -594,7 +571,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
err = o.readMetaData(false) // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -605,11 +582,11 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Jot
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// CreateDir makes a directory
func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response
opts := rest.Opts{
@@ -621,7 +598,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
opts.Parameters.Set("mkDir", "true")
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
resp, err = f.srv.CallXML(&opts, nil, &jf)
return shouldRetry(resp, err)
})
if err != nil {
@@ -642,6 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, e
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", f.filePath(dir))
opts := rest.Opts{
Method: "GET",
Path: f.filePath(dir),
@@ -650,7 +628,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
var resp *http.Response
var result api.JottaFolder
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -684,12 +662,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
remote := path.Join(dir, restoreReservedChars(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
o, err := f.newObjectWithInfo(remote, item)
if err != nil {
continue
}
entries = append(entries, o)
}
//fmt.Printf("Entries: %+v\n", entries)
return entries, nil
}
@@ -698,7 +677,7 @@ type listFileDirFn func(fs.DirEntry) error
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
@@ -727,7 +706,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
continue
}
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
o, err := f.newObjectWithInfo(remoteFile, file)
if err != nil {
return err
}
@@ -745,6 +724,17 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
opts := rest.Opts{
Method: "GET",
@@ -756,7 +746,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -769,7 +759,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return errors.Wrap(err, "couldn't list files")
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
@@ -823,7 +813,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
_, err := f.CreateDir(ctx, dir)
_, err := f.CreateDir(dir)
return err
}
@@ -862,13 +852,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't purge directory")
}
// TODO: Parse response?
return nil
}
@@ -885,12 +876,16 @@ func (f *Fs) Precision() time.Duration {
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: src,
@@ -901,7 +896,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
resp, err = f.srv.CallXML(&opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -930,13 +925,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, &result)
}
@@ -960,13 +955,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
return f.newObjectWithInfo(ctx, remote, info)
return f.newObjectWithInfo(remote, info)
//return f.newObjectWithInfo(remote, result)
}
@@ -1004,7 +999,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
@@ -1029,7 +1024,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var resp *http.Response
var result api.JottaFile
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return shouldRetry(resp, err)
})
@@ -1060,7 +1055,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
info, err := getDriveInfo(ctx, f.srv, f.user)
info, err := getAccountInfo(f.srv, f.user)
if err != nil {
return nil, err
}
@@ -1100,11 +1095,6 @@ func (o *Object) Remote() string {
return o.remote
}
// filePath returns a escaped file path (f.root, remote)
func (o *Object) filePath() string {
return o.fs.filePath(o.remote)
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
@@ -1115,8 +1105,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx, false)
err := o.readMetaData(false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -1139,12 +1128,11 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
return nil
}
// readMetaData reads and updates the metadata for an object
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
func (o *Object) readMetaData(force bool) (err error) {
if o.hasMetaData && !force {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil {
return err
}
@@ -1159,7 +1147,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx, false)
err := o.readMetaData(false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1191,7 +1179,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Parameters.Set("mode", "bin")
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1284,7 +1272,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Path: "allocate",
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1301,7 +1289,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// send it
var response api.AllocateFileResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1332,7 +1320,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// send the remaining bytes
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
if err != nil {
return err
}
@@ -1343,8 +1331,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
return o.readMetaData(ctx, true)
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
return o.readMetaData(true)
}
return nil
@@ -1366,7 +1354,7 @@ func (o *Object) Remove(ctx context.Context) error {
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
return shouldRetry(resp, err)
})
}

View File

@@ -154,7 +154,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
fs.FixRangeOption(options, o.Size())
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -171,6 +170,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,

View File

View File

@@ -1,12 +0,0 @@
//+build !linux
package local
import (
"io"
"os"
)
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
return f
}

View File

@@ -1,165 +0,0 @@
//+build linux
package local
import (
"io"
"os"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
// fadvise provides means to automate freeing pages in kernel page cache for
// a given file descriptor as the file is sequentially processed (read or
// written).
//
// When copying a file to a remote backend all the file content is read by
// kernel and put to page cache to make future reads faster.
// This causes memory pressure visible in both memory usage and CPU consumption
// and can even cause OOM errors in applications consuming large amounts memory.
//
// In case of an upload to a remote backend, there is no benefits from caching.
//
// fadvise would orchestrate calling POSIX_FADV_DONTNEED
//
// POSIX_FADV_DONTNEED attempts to free cached pages associated
// with the specified region. This is useful, for example, while
// streaming large files. A program may periodically request the
// kernel to free cached data that has already been used, so that
// more useful cached pages are not discarded instead.
//
// Requests to discard partial pages are ignored. It is
// preferable to preserve needed data than discard unneeded data.
// If the application requires that data be considered for
// discarding, then offset and len must be page-aligned.
//
// The implementation may attempt to write back dirty pages in
// the specified region, but this is not guaranteed. Any
// unwritten dirty pages will not be freed. If the application
// wishes to ensure that dirty pages will be released, it should
// call fsync(2) or fdatasync(2) first.
type fadvise struct {
o *Object
fd int
lastPos int64
curPos int64
windowSize int64
freePagesCh chan offsetLength
doneCh chan struct{}
}
type offsetLength struct {
offset int64
length int64
}
const (
defaultAllowPages = 32
defaultWorkerQueueSize = 64
)
func newFadvise(o *Object, fd int, offset int64) *fadvise {
f := &fadvise{
o: o,
fd: fd,
lastPos: offset,
curPos: offset,
windowSize: int64(os.Getpagesize()) * defaultAllowPages,
freePagesCh: make(chan offsetLength, defaultWorkerQueueSize),
doneCh: make(chan struct{}),
}
go f.worker()
return f
}
// sequential configures readahead strategy in Linux kernel.
//
// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the
// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles
// this size, and POSIX_FADV_RANDOM disables file readahead entirely.
func (f *fadvise) sequential(limit int64) bool {
l := int64(0)
if limit > 0 {
l = limit
}
if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil {
fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err)
return false
}
return true
}
func (f *fadvise) next(n int) {
f.curPos += int64(n)
f.freePagesIfNeeded()
}
func (f *fadvise) freePagesIfNeeded() {
if f.curPos >= f.lastPos+f.windowSize {
f.freePages()
}
}
func (f *fadvise) freePages() {
f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos}
f.lastPos = f.curPos
}
func (f *fadvise) worker() {
for p := range f.freePagesCh {
if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil {
fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err)
}
}
close(f.doneCh)
}
func (f *fadvise) wait() {
close(f.freePagesCh)
<-f.doneCh
}
type fadviseReadCloser struct {
*fadvise
inner io.ReadCloser
}
// newFadviseReadCloser wraps os.File so that reading from that file would
// remove already consumed pages from kernel page cache.
// In addition to that it instructs kernel to double the readahead window to
// make sequential reads faster.
// See also fadvise.
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
r := fadviseReadCloser{
fadvise: newFadvise(o, int(f.Fd()), offset),
inner: f,
}
// If syscall failed it's likely that the subsequent syscalls to that
// file descriptor would also fail. In that case return the provided os.File
// pointer.
if !r.sequential(limit) {
r.wait()
return f
}
return r
}
func (f fadviseReadCloser) Read(p []byte) (n int, err error) {
n, err = f.inner.Read(p)
f.next(n)
return
}
func (f fadviseReadCloser) Close() error {
f.freePages()
f.wait()
return f.inner.Close()
}

View File

@@ -194,7 +194,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
CanHaveEmptyDirectories: true,
IsLocal: true,
}).Fill(f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -778,11 +777,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
var in io.ReadCloser
if !o.translatedLink {
var fd *os.File
fd, err = file.Open(o.path)
if fd != nil {
in = newFadviseReadCloser(o, fd, 0, 0)
}
in, err = file.Open(o.path)
} else {
in, err = o.openTranslatedLink(0, -1)
}
@@ -918,7 +913,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
var hasher *hash.MultiHasher
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
@@ -926,12 +921,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
case *fs.RangeOption:
offset, limit = x.Decode(o.size)
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return nil, err
}
}
hashes = x.Hashes
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
@@ -948,22 +938,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return
}
wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
if offset != 0 {
// seek the object
_, err = fd.Seek(offset, io.SeekStart)
// don't attempt to make checksums
return wrappedFd, err
}
if hasher == nil {
// no need to wrap since we don't need checksums
return wrappedFd, nil
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
// Update the hashes as we go along
// Update the md5sum as we go along
in = &localOpenFile{
o: o,
in: wrappedFd,
hash: hasher,
hash: hash,
fd: fd,
}
return in, nil
@@ -985,23 +975,18 @@ func (nwc nopWriterCloser) Close() error {
}
// Update the object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser
var hasher *hash.MultiHasher
hashes := hash.Supported
for _, option := range options {
switch x := option.(type) {
case *fs.HashesOption:
if x.Hashes.Count() > 0 {
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
if err != nil {
return err
}
}
hashes = x.Hashes
}
}
err = o.mkdirAll()
err := o.mkdirAll()
if err != nil {
return err
}
@@ -1026,9 +1011,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Calculate the hash of the object we are reading as we go along
if hasher != nil {
in = io.TeeReader(in, hasher)
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return err
}
in = io.TeeReader(in, hash)
_, err = io.Copy(out, in)
closeErr := out.Close()
@@ -1064,11 +1051,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// All successful so update the hashes
if hasher != nil {
o.fs.objectHashesMu.Lock()
o.hashes = hasher.Sums()
o.fs.objectHashesMu.Unlock()
}
o.fs.objectHashesMu.Lock()
o.hashes = hash.Sums()
o.fs.objectHashesMu.Unlock()
// Set the mtime
err = o.SetModTime(ctx, src.ModTime(ctx))

View File

@@ -1,107 +0,0 @@
package api
// BIN protocol constants
const (
BinContentType = "application/x-www-form-urlencoded"
TreeIDLength = 12
DunnoNodeIDLength = 16
)
// Operations in binary protocol
const (
OperationAddFile = 103 // 0x67
OperationRename = 105 // 0x69
OperationCreateFolder = 106 // 0x6A
OperationFolderList = 117 // 0x75
OperationSharedFoldersList = 121 // 0x79
// TODO investigate opcodes below
Operation154MaybeItemInfo = 154 // 0x9A
Operation102MaybeAbout = 102 // 0x66
Operation104MaybeDelete = 104 // 0x68
)
// CreateDir protocol constants
const (
MkdirResultOK = 0
MkdirResultSourceNotExists = 1
MkdirResultAlreadyExists = 4
MkdirResultExistsDifferentCase = 9
MkdirResultInvalidName = 10
MkdirResultFailed254 = 254
)
// Move result codes
const (
MoveResultOK = 0
MoveResultSourceNotExists = 1
MoveResultFailed002 = 2
MoveResultAlreadyExists = 4
MoveResultFailed005 = 5
MoveResultFailed254 = 254
)
// AddFile result codes
const (
AddResultOK = 0
AddResultError01 = 1
AddResultDunno04 = 4
AddResultWrongPath = 5
AddResultNoFreeSpace = 7
AddResultDunno09 = 9
AddResultInvalidName = 10
AddResultNotModified = 12
AddResultFailedA = 253
AddResultFailedB = 254
)
// List request options
const (
ListOptTotalSpace = 1
ListOptDelete = 2
ListOptFingerprint = 4
ListOptUnknown8 = 8
ListOptUnknown16 = 16
ListOptFolderSize = 32
ListOptUsedSpace = 64
ListOptUnknown128 = 128
ListOptUnknown256 = 256
)
// ListOptDefaults ...
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
// List parse flags
const (
ListParseDone = 0
ListParseReadItem = 1
ListParsePin = 2
ListParsePinUpper = 3
ListParseUnknown15 = 15
)
// List operation results
const (
ListResultOK = 0
ListResultNotExists = 1
ListResultDunno02 = 2
ListResultDunno03 = 3
ListResultAlreadyExists04 = 4
ListResultDunno05 = 5
ListResultDunno06 = 6
ListResultDunno07 = 7
ListResultDunno08 = 8
ListResultAlreadyExists09 = 9
ListResultDunno10 = 10
ListResultDunno11 = 11
ListResultDunno12 = 12
ListResultFailedB = 253
ListResultFailedA = 254
)
// Directory item types
const (
ListItemMountPoint = 0
ListItemFile = 1
ListItemFolder = 2
ListItemSharedFolder = 3
)

View File

@@ -1,225 +0,0 @@
package api
// BIN protocol helpers
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"log"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/lib/readers"
)
// protocol errors
var (
ErrorPrematureEOF = errors.New("Premature EOF")
ErrorInvalidLength = errors.New("Invalid length")
ErrorZeroTerminate = errors.New("String must end with zero")
)
// BinWriter is a binary protocol writer
type BinWriter struct {
b *bytes.Buffer // growing byte buffer
a []byte // temporary buffer for next varint
}
// NewBinWriter creates a binary protocol helper
func NewBinWriter() *BinWriter {
return &BinWriter{
b: new(bytes.Buffer),
a: make([]byte, binary.MaxVarintLen64),
}
}
// Bytes returns binary data
func (w *BinWriter) Bytes() []byte {
return w.b.Bytes()
}
// Reader returns io.Reader with binary data
func (w *BinWriter) Reader() io.Reader {
return bytes.NewReader(w.b.Bytes())
}
// WritePu16 writes a short as unsigned varint
func (w *BinWriter) WritePu16(val int) {
if val < 0 || val > 65535 {
log.Fatalf("Invalid UInt16 %v", val)
}
w.WritePu64(int64(val))
}
// WritePu32 writes a signed long as unsigned varint
func (w *BinWriter) WritePu32(val int64) {
if val < 0 || val > 4294967295 {
log.Fatalf("Invalid UInt32 %v", val)
}
w.WritePu64(val)
}
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
func (w *BinWriter) WritePu64(val int64) {
if val < 0 {
log.Fatalf("Invalid UInt64 %v", val)
}
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)
w.WritePu64(int64(len(buf) + 1))
w.b.Write(buf)
w.b.WriteByte(0)
}
// Write writes a byte buffer
func (w *BinWriter) Write(buf []byte) {
w.b.Write(buf)
}
// WriteWithLength writes a byte buffer prepended with its length as varint
func (w *BinWriter) WriteWithLength(buf []byte) {
w.WritePu64(int64(len(buf)))
w.b.Write(buf)
}
// BinReader is a binary protocol reader helper
type BinReader struct {
b *bufio.Reader
count *readers.CountingReader
err error // keeps the first error encountered
}
// NewBinReader creates a binary protocol reader helper
func NewBinReader(reader io.Reader) *BinReader {
r := &BinReader{}
r.count = readers.NewCountingReader(reader)
r.b = bufio.NewReader(r.count)
return r
}
// Count returns number of bytes read
func (r *BinReader) Count() uint64 {
return r.count.BytesRead()
}
// Error returns first encountered error or nil
func (r *BinReader) Error() error {
return r.err
}
// check() keeps the first error encountered in a stream
func (r *BinReader) check(err error) bool {
if err == nil {
return true
}
if r.err == nil {
// keep the first error
r.err = err
}
if err != io.EOF {
log.Fatalf("Error parsing response: %v", err)
}
return false
}
// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors
func (r *BinReader) ReadByteAsInt() int {
if octet, err := r.b.ReadByte(); r.check(err) {
return int(octet)
}
return -1
}
// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors
func (r *BinReader) ReadByteAsShort() int16 {
if octet, err := r.b.ReadByte(); r.check(err) {
return int16(octet)
}
return -1
}
// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors
func (r *BinReader) ReadIntSpl() int {
var val uint16
if r.check(binary.Read(r.b, binary.LittleEndian, &val)) {
return int(val)
}
return -1
}
// ReadULong returns uint64 equivalent of -1 for EOF or errors
func (r *BinReader) ReadULong() uint64 {
if val, err := binary.ReadUvarint(r.b); r.check(err) {
return val
}
return 0xffffffffffffffff
}
// ReadPu32 returns -1 for EOF or errors
func (r *BinReader) ReadPu32() int64 {
if val, err := binary.ReadUvarint(r.b); r.check(err) {
return int64(val)
}
return -1
}
// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors
func (r *BinReader) ReadNBytes(len int) []byte {
buf := make([]byte, len)
n, err := r.b.Read(buf)
if r.check(err) {
return buf
}
if n != len {
r.check(ErrorPrematureEOF)
}
return buf
}
// ReadBytesByLength reads buffer length and its bytes
func (r *BinReader) ReadBytesByLength() []byte {
len := r.ReadPu32()
if len < 0 {
r.check(ErrorInvalidLength)
return []byte{}
}
return r.ReadNBytes(int(len))
}
// ReadString reads a zero-terminated string with length
func (r *BinReader) ReadString() string {
len := int(r.ReadPu32())
if len < 1 {
r.check(ErrorInvalidLength)
return ""
}
buf := make([]byte, len-1)
n, err := r.b.Read(buf)
if !r.check(err) {
return ""
}
if n != len-1 {
r.check(ErrorPrematureEOF)
return ""
}
zeroByte, err := r.b.ReadByte()
if !r.check(err) {
return ""
}
if zeroByte != 0 {
r.check(ErrorZeroTerminate)
return ""
}
return string(buf)
}
// ReadDate reads a Unix encoded time
func (r *BinReader) ReadDate() time.Time {
return time.Unix(r.ReadPu32(), 0)
}

View File

@@ -1,248 +0,0 @@
package api
import (
"fmt"
)
// M1 protocol constants and structures
const (
APIServerURL = "https://cloud.mail.ru"
PublicLinkURL = "https://cloud.mail.ru/public/"
DispatchServerURL = "https://dispatcher.cloud.mail.ru"
OAuthURL = "https://o2.mail.ru/token"
OAuthClientID = "cloud-win"
)
// ServerErrorResponse represents erroneous API response.
type ServerErrorResponse struct {
Message string `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
func (e *ServerErrorResponse) Error() string {
return fmt.Sprintf("server error %d (%s)", e.Status, e.Message)
}
// FileErrorResponse represents erroneous API response for a file
type FileErrorResponse struct {
Body struct {
Home struct {
Value string `json:"value"`
Error string `json:"error"`
} `json:"home"`
} `json:"body"`
Status int `json:"status"`
Account string `json:"email,omitempty"`
Time int64 `json:"time,omitempty"`
Message string // non-json, calculated field
}
func (e *FileErrorResponse) Error() string {
return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error)
}
// UserInfoResponse contains account metadata
type UserInfoResponse struct {
Body struct {
AccountType string `json:"account_type"`
AccountVerified bool `json:"account_verified"`
Cloud struct {
Beta struct {
Allowed bool `json:"allowed"`
Asked bool `json:"asked"`
} `json:"beta"`
Billing struct {
ActiveCostID string `json:"active_cost_id"`
ActiveRateID string `json:"active_rate_id"`
AutoProlong bool `json:"auto_prolong"`
Basequota int64 `json:"basequota"`
Enabled bool `json:"enabled"`
Expires int `json:"expires"`
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
Subscription []interface{} `json:"subscription"`
Version string `json:"version"`
} `json:"billing"`
Bonuses struct {
CameraUpload bool `json:"camera_upload"`
Complete bool `json:"complete"`
Desktop bool `json:"desktop"`
Feedback bool `json:"feedback"`
Links bool `json:"links"`
Mobile bool `json:"mobile"`
Registration bool `json:"registration"`
} `json:"bonuses"`
Enable struct {
Sharing bool `json:"sharing"`
} `json:"enable"`
FileSizeLimit int64 `json:"file_size_limit"`
Space struct {
BytesTotal int64 `json:"bytes_total"`
BytesUsed int `json:"bytes_used"`
Overquota bool `json:"overquota"`
} `json:"space"`
} `json:"cloud"`
Cloudflags struct {
Exists bool `json:"exists"`
} `json:"cloudflags"`
Domain string `json:"domain"`
Login string `json:"login"`
Newbie bool `json:"newbie"`
UI struct {
ExpandLoader bool `json:"expand_loader"`
Kind string `json:"kind"`
Sidebar bool `json:"sidebar"`
Sort struct {
Order string `json:"order"`
Type string `json:"type"`
} `json:"sort"`
Thumbs bool `json:"thumbs"`
} `json:"ui"`
} `json:"body"`
Email string `json:"email"`
Status int `json:"status"`
Time int64 `json:"time"`
}
// ListItem ...
type ListItem struct {
Count struct {
Folders int `json:"folders"`
Files int `json:"files"`
} `json:"count,omitempty"`
Kind string `json:"kind"`
Type string `json:"type"`
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`
Grev int `json:"grev,omitempty"`
Rev int `json:"rev,omitempty"`
}
// ItemInfoResponse ...
type ItemInfoResponse struct {
Email string `json:"email"`
Body ListItem `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// FolderInfoResponse ...
type FolderInfoResponse struct {
Body struct {
Count struct {
Folders int `json:"folders"`
Files int `json:"files"`
} `json:"count"`
Tree string `json:"tree"`
Name string `json:"name"`
Grev int `json:"grev"`
Size int64 `json:"size"`
Sort struct {
Order string `json:"order"`
Type string `json:"type"`
} `json:"sort"`
Kind string `json:"kind"`
Rev int `json:"rev"`
Type string `json:"type"`
Home string `json:"home"`
List []ListItem `json:"list"`
} `json:"body,omitempty"`
Time int64 `json:"time"`
Status int `json:"status"`
Email string `json:"email"`
}
// ShardInfoResponse ...
type ShardInfoResponse struct {
Email string `json:"email"`
Body struct {
Video []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"video"`
ViewDirect []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view_direct"`
WeblinkView []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_view"`
WeblinkVideo []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_video"`
WeblinkGet []struct {
Count int `json:"count"`
URL string `json:"url"`
} `json:"weblink_get"`
Stock []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"stock"`
WeblinkThumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"weblink_thumbnails"`
PublicUpload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"public_upload"`
Auth []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"auth"`
Web []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"web"`
View []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"view"`
Upload []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"upload"`
Get []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"get"`
Thumbnails []struct {
Count string `json:"count"`
URL string `json:"url"`
} `json:"thumbnails"`
} `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}
// CleanupResponse ...
type CleanupResponse struct {
Email string `json:"email"`
Time int64 `json:"time"`
StatusStr string `json:"status"`
}
// GenericResponse ...
type GenericResponse struct {
Email string `json:"email"`
Time int64 `json:"time"`
Status int `json:"status"`
// ignore other fields
}
// GenericBodyResponse ...
type GenericBodyResponse struct {
Email string `json:"email"`
Body string `json:"body"`
Time int64 `json:"time"`
Status int `json:"status"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +0,0 @@
// Test Mailru filesystem interface
package mailru_test
import (
"testing"
"github.com/rclone/rclone/backend/mailru"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestMailru:",
NilObject: (*mailru.Object)(nil),
SkipBadWindowsCharacters: true,
})
}

View File

@@ -1,134 +0,0 @@
// Package mrhash implements the mailru hash, which is a modified SHA1.
// If file size is less than or equal to the SHA1 block size (20 bytes),
// its hash is simply its data right-padded with zero bytes.
// Hash sum of a larger file is computed as a SHA1 sum of the file data
// bytes concatenated with a decimal representation of the data length.
package mrhash
import (
"crypto/sha1"
"encoding"
"encoding/hex"
"errors"
"hash"
"strconv"
)
const (
// BlockSize of the checksum in bytes.
BlockSize = sha1.BlockSize
// Size of the checksum in bytes.
Size = sha1.Size
startString = "mrCloud"
hashError = "hash function returned error"
)
// Global errors
var (
ErrorInvalidHash = errors.New("invalid hash")
)
type digest struct {
total int // bytes written into hash so far
sha hash.Hash // underlying SHA1
small []byte // small content
}
// New returns a new hash.Hash computing the Mailru checksum.
func New() hash.Hash {
d := &digest{}
d.Reset()
return d
}
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (d *digest) Write(p []byte) (n int, err error) {
n, err = d.sha.Write(p)
if err != nil {
panic(hashError)
}
d.total += n
if d.total <= Size {
d.small = append(d.small, p...)
}
return n, nil
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (d *digest) Sum(b []byte) []byte {
// If content is small, return it padded to Size
if d.total <= Size {
padded := make([]byte, Size)
copy(padded, d.small)
return append(b, padded...)
}
endString := strconv.Itoa(d.total)
copy, err := cloneSHA1(d.sha)
if err == nil {
_, err = copy.Write([]byte(endString))
}
if err != nil {
panic(hashError)
}
return copy.Sum(b)
}
// cloneSHA1 clones state of SHA1 hash
func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) {
state, err := orig.(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
return nil, err
}
clone = sha1.New()
err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state)
return
}
// Reset resets the Hash to its initial state.
func (d *digest) Reset() {
d.sha = sha1.New()
_, _ = d.sha.Write([]byte(startString))
d.total = 0
}
// Size returns the number of bytes Sum will return.
func (d *digest) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (d *digest) BlockSize() int {
return BlockSize
}
// Sum returns the Mailru checksum of the data.
func Sum(data []byte) []byte {
var d digest
d.Reset()
_, _ = d.Write(data)
return d.Sum(nil)
}
// DecodeString converts a string to the Mailru hash
func DecodeString(s string) ([]byte, error) {
b, err := hex.DecodeString(s)
if err != nil || len(b) != Size {
return nil, ErrorInvalidHash
}
return b, nil
}
// must implement this interface
var (
_ hash.Hash = (*digest)(nil)
)

View File

@@ -1,81 +0,0 @@
package mrhash_test
import (
"encoding/hex"
"fmt"
"testing"
"github.com/rclone/rclone/backend/mailru/mrhash"
"github.com/stretchr/testify/assert"
)
func testChunk(t *testing.T, chunk int) {
data := make([]byte, chunk)
for i := 0; i < chunk; i++ {
data[i] = 'A'
}
for _, test := range []struct {
n int
want string
}{
{0, "0000000000000000000000000000000000000000"},
{1, "4100000000000000000000000000000000000000"},
{2, "4141000000000000000000000000000000000000"},
{19, "4141414141414141414141414141414141414100"},
{20, "4141414141414141414141414141414141414141"},
{21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"},
{22, "037e6d960601118a0639afbeff30fe716c66ed2d"},
{4096, "45a16aa192502b010280fb5b44274c601a91fd9f"},
{4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"},
{4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"},
{4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"},
{8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"},
{8388608, "267a970917c624c11fe624276ec60233a66dc2c0"},
{8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"},
} {
d := mrhash.New()
var toWrite int
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
n, err := d.Write(data)
assert.Nil(t, err)
assert.Equal(t, chunk, n)
}
n, err := d.Write(data[:toWrite])
assert.Nil(t, err)
assert.Equal(t, toWrite, n)
got1 := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n))
got2 := hex.EncodeToString(d.Sum(nil))
assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n))
}
}
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
func TestSumCalledTwice(t *testing.T) {
d := mrhash.New()
assert.NotPanics(t, func() { d.Sum(nil) })
d.Reset()
assert.NotPanics(t, func() { d.Sum(nil) })
assert.NotPanics(t, func() { d.Sum(nil) })
_, _ = d.Write([]byte{1})
assert.NotPanics(t, func() { d.Sum(nil) })
}
func TestSize(t *testing.T) {
d := mrhash.New()
assert.Equal(t, 20, d.Size())
}
func TestBlockSize(t *testing.T) {
d := mrhash.New()
assert.Equal(t, 64, d.BlockSize())
}

View File

@@ -72,7 +72,6 @@ func init() {
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
err := oauthutil.Config("onedrive", name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
@@ -144,7 +143,7 @@ func init() {
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
_, err := srv.CallJSON(&opts, nil, &sites)
if err != nil {
log.Fatalf("Failed to query available sites: %v", err)
}
@@ -173,7 +172,7 @@ func init() {
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
_, err := srv.CallJSON(&opts, nil, &drives)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
@@ -195,7 +194,7 @@ func init() {
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
_, err = srv.CallJSON(&opts, nil, &rootItem)
if err != nil {
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
}
@@ -344,10 +343,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
@@ -372,7 +371,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
return info, resp, err
@@ -427,7 +426,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
}
}
return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
}
// errorHandler parses a non 2xx error response into an error
@@ -593,7 +592,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
if !ok {
return "", false, errors.New("couldn't find parent ID")
}
info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
if err != nil {
if resp != nil && resp.StatusCode == http.StatusNotFound {
return "", false, nil
@@ -620,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
ConflictBehavior: "fail",
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -643,7 +642,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
@@ -652,7 +651,7 @@ OUTER:
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -710,7 +709,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
@@ -794,12 +793,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
opts := newOptsCall(id, "DELETE", "")
opts.NoResponse = true
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
}
@@ -822,7 +821,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
if check {
// check to see if there are any items
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
return true
})
if err != nil {
@@ -832,7 +831,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
return fs.ErrorDirectoryNotEmpty
}
}
err = f.deleteObject(ctx, rootID)
err = f.deleteObject(rootID)
if err != nil {
return err
}
@@ -942,7 +941,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &copyReq, nil)
resp, err = f.srv.CallJSON(&opts, &copyReq, nil)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1030,7 +1029,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1123,7 +1122,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
// Get timestamps of src so they can be preserved
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
if err != nil {
return err
}
@@ -1145,7 +1144,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var resp *http.Response
var info api.Item
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
resp, err = f.srv.CallJSON(&opts, &move, &info)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1171,7 +1170,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
resp, err = f.srv.CallJSON(&opts, nil, &drive)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1211,7 +1210,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var resp *http.Response
var result api.CreateShareLinkResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
resp, err = f.srv.CallJSON(&opts, &share, &result)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1371,7 +1370,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
}
var info *api.Item
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
return shouldRetry(resp, err)
})
return info, err
@@ -1406,7 +1405,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Options = options
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1442,7 +1441,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
@@ -1455,7 +1454,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
opts := rest.Opts{
Method: "PUT",
RootURL: url,
@@ -1465,30 +1464,28 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
}
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
err = o.fs.pacer.Call(func() (bool, error) {
_, _ = chunk.Seek(0, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(resp, err)
resp, err = o.fs.srv.Call(&opts)
if resp != nil {
defer fs.CheckClose(resp.Body, &err)
}
body, err = rest.ReadBody(resp)
if err != nil {
return shouldRetry(resp, err)
retry, err := shouldRetry(resp, err)
if !retry && resp != nil {
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
// read the item
info = &api.Item{}
return false, json.NewDecoder(resp.Body).Decode(info)
}
}
if resp.StatusCode == 200 || resp.StatusCode == 201 {
// we are done :)
// read the item
info = &api.Item{}
return false, json.Unmarshal(body, info)
}
return false, nil
return retry, err
})
return info, err
}
// cancelUploadSession cancels an upload session
func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
func (o *Object) cancelUploadSession(url string) (err error) {
opts := rest.Opts{
Method: "DELETE",
RootURL: url,
@@ -1496,7 +1493,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
return
@@ -1518,7 +1515,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
fs.Debugf(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(ctx, uploadURL)
cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
@@ -1554,7 +1551,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
info, err = o.uploadFragment(uploadURL, position, size, seg, n)
if err != nil {
return nil, err
}
@@ -1595,7 +1592,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
// Make the error more user-friendly
@@ -1647,7 +1644,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteObject(ctx, o.id)
return o.fs.deleteObject(o.id)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -161,7 +161,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Method: "POST",
Path: "/session/login.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
resp, err = f.srv.CallJSON(&opts, &account, &f.session)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -246,7 +246,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
}
// deleteObject removes an object by ID
func (f *Fs) deleteObject(ctx context.Context, id string) error {
func (f *Fs) deleteObject(id string) error {
return f.pacer.Call(func() (bool, error) {
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
opts := rest.Opts{
@@ -254,7 +254,7 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
NoResponse: true,
Path: "/folder/remove.json",
}
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
return f.shouldRetry(resp, err)
})
}
@@ -275,14 +275,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
if err != nil {
return err
}
item, err := f.readMetaDataForFolderID(ctx, rootID)
item, err := f.readMetaDataForFolderID(rootID)
if err != nil {
return err
}
if check && len(item.Files) != 0 {
return errors.New("folder not empty")
}
err = f.deleteObject(ctx, rootID)
err = f.deleteObject(rootID)
if err != nil {
return err
}
@@ -353,7 +353,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -410,7 +410,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
Method: "POST",
Path: "/file/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &copyFileData, &response)
resp, err = f.srv.CallJSON(&opts, &copyFileData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -509,7 +509,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
Method: "POST",
Path: "/folder/move_copy.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -589,14 +589,14 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) {
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -641,7 +641,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -694,7 +694,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
Method: "POST",
Path: "/folder.json",
}
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -722,7 +722,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
Method: "GET",
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
}
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -769,7 +769,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
folderList := FolderList{}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -853,7 +853,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
return o.fs.shouldRetry(resp, err)
})
@@ -873,7 +873,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -892,7 +892,7 @@ func (o *Object) Remove(ctx context.Context) error {
NoResponse: true,
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
}
resp, err := o.fs.srv.Call(ctx, &opts)
resp, err := o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
}
@@ -920,7 +920,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -966,7 +966,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
MultipartFileName: o.remote, // ..name of the file for the attached file
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
resp, err = o.fs.srv.CallJSON(&opts, nil, &reply)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -989,7 +989,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/upload/close_file_upload.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
resp, err = o.fs.srv.CallJSON(&opts, &closeUploadData, &closeResponse)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1015,7 +1015,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
NoResponse: true,
Path: "/file/access.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1040,7 +1040,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
Method: "GET",
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
return o.fs.shouldRetry(resp, err)
})
if err != nil {

View File

@@ -201,7 +201,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -334,7 +334,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -357,7 +357,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
opts.Parameters.Set("name", replaceReservedChars(leaf))
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -400,7 +400,7 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/listfolder",
@@ -412,7 +412,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
var result api.ItemResult
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -458,7 +458,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
@@ -563,7 +563,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -628,7 +628,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -666,7 +666,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
var resp *http.Response
var result api.Error
return f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Update(err)
return shouldRetry(resp, err)
})
@@ -706,7 +706,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -803,7 +803,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = f.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -830,7 +830,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var resp *http.Response
var q api.UserInfo
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
resp, err = f.srv.CallJSON(&opts, nil, &q)
err = q.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -881,7 +881,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
}
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -984,7 +984,7 @@ func (o *Object) Storable() bool {
}
// downloadURL fetches the download link
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
func (o *Object) downloadURL() (URL string, err error) {
if o.id == "" {
return "", errors.New("can't download - no id")
}
@@ -1000,7 +1000,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
}
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -1016,7 +1016,7 @@ func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url, err := o.downloadURL(ctx)
url, err := o.downloadURL()
if err != nil {
return nil, err
}
@@ -1027,7 +1027,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1104,7 +1104,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})
@@ -1134,7 +1134,7 @@ func (o *Object) Remove(ctx context.Context) error {
var result api.ItemResult
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
resp, err := o.fs.srv.CallJSON(&opts, nil, &result)
err = result.Error.Update(err)
return shouldRetry(resp, err)
})

View File

@@ -1,83 +0,0 @@
// Package api contains definitions for using the premiumize.me API
package api
import "fmt"
// Response is returned by all messages and embedded in the
// structures below
type Response struct {
Message string `json:"message,omitempty"`
Status string `json:"status"`
}
// Error statisfies the error interface
func (e *Response) Error() string {
return fmt.Sprintf("%s: %s", e.Status, e.Message)
}
// AsErr checks the status and returns an err if bad or nil if good
func (e *Response) AsErr() error {
if e.Status != "success" {
return e
}
return nil
}
// Item Types
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
)
// Item refers to a file or folder
type Item struct {
Breadcrumbs []Breadcrumb `json:"breadcrumbs"`
CreatedAt int64 `json:"created_at,omitempty"`
ID string `json:"id"`
Link string `json:"link,omitempty"`
Name string `json:"name"`
Size int64 `json:"size,omitempty"`
StreamLink string `json:"stream_link,omitempty"`
Type string `json:"type"`
TranscodeStatus string `json:"transcode_status"`
IP string `json:"ip"`
MimeType string `json:"mime_type"`
}
// Breadcrumb is part the breadcrumb trail for a file or folder. It
// is returned as part of folder/list if required
type Breadcrumb struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
}
// FolderListResponse is the response to folder/list
type FolderListResponse struct {
Response
Content []Item `json:"content"`
Name string `json:"name,omitempty"`
ParentID string `json:"parent_id,omitempty"`
}
// FolderCreateResponse is the response to folder/create
type FolderCreateResponse struct {
Response
ID string `json:"id,omitempty"`
}
// FolderUploadinfoResponse is the response to folder/uploadinfo
type FolderUploadinfoResponse struct {
Response
Token string `json:"token,omitempty"`
URL string `json:"url,omitempty"`
}
// AccountInfoResponse is the response to account/info
type AccountInfoResponse struct {
Response
CustomerID string `json:"customer_id,omitempty"`
LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit
PremiumUntil int64 `json:"premium_until,omitempty"`
SpaceUsed float64 `json:"space_used,omitempty"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test filesystem interface
package premiumizeme_test
import (
"testing"
"github.com/rclone/rclone/backend/premiumizeme"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPremiumizeMe:",
NilObject: (*premiumizeme.Object)(nil),
})
}

View File

@@ -1,693 +0,0 @@
package putio
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Fs represents a remote Putio server
type Fs struct {
name string // name of this remote
root string // the path we are working on
features *fs.Features // optional features
client *putio.Client // client for making API calls to Put.io
pacer *fs.Pacer // To pace the API calls
dirCache *dircache.DirCache // Map of directory path to directory id
oAuthClient *http.Client
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Putio root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if fserrors.ShouldRetry(err) {
return true, err
}
if perr, ok := err.(*putio.ErrorResponse); ok {
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
return true, err
}
}
return false, err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
}
p := &Fs{
name: name,
root: root,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient),
oAuthClient: oAuthClient,
}
p.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(p)
p.dirCache = dircache.New(root, "0", p)
ctx := context.Background()
// Find the current root
err = p.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *p
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return p, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
// unable to list folder so return old f
return p, nil
}
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
p.dirCache = tempF.dirCache
p.root = tempF.root
return p, fs.ErrorIsFile
}
// fs.Debugf(p, "Root id: %s", p.dirCache.RootID())
return p, nil
}
func itoa(i int64) string {
return strconv.FormatInt(i, 10)
}
func atoi(a string) int64 {
i, err := strconv.ParseInt(a, 10, 64)
if err != nil {
panic(err)
}
return i
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("newID=%v, err=%v", newID, &err)
parentID := atoi(pathID)
var entry putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
entry, err = f.client.Files.CreateFolder(ctx, leaf, parentID)
return shouldRetry(err)
})
return itoa(entry.ID), err
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("pathIDOut=%v, found=%v, err=%v", pathIDOut, found, &err)
if pathID == "0" && leaf == "" {
// that's the root directory
return pathID, true, nil
}
fileID := atoi(pathID)
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing file: %d", fileID)
children, _, err = f.client.Files.List(ctx, fileID)
return shouldRetry(err)
})
if err != nil {
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
err = nil
}
return
}
for _, child := range children {
if child.Name == leaf {
found = true
pathIDOut = itoa(child.ID)
if !child.IsDir() {
err = fs.ErrorNotAFile
}
return
}
}
return
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
parentID := atoi(directoryID)
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files inside List: %d", parentID)
children, _, err = f.client.Files.List(ctx, parentID)
return shouldRetry(err)
})
if err != nil {
return
}
for _, child := range children {
remote := path.Join(dir, child.Name)
// fs.Debugf(f, "child: %s", remote)
if child.IsDir() {
f.dirCache.Put(remote, itoa(child.ID))
d := fs.NewDir(remote, child.UpdatedAt.Time)
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, child)
if err != nil {
return nil, err
}
entries = append(entries, o)
}
}
return
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
size := src.Size()
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
if err != nil {
return nil, err
}
fileID, err := f.sendUpload(ctx, loc, size, in)
if err != nil {
return nil, err
}
var entry putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "getting file: %d", fileID)
entry, err = f.client.Files.Get(ctx, fileID)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.newObjectWithInfo(ctx, remote, entry)
}
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
err = f.pacer.Call(func() (bool, error) {
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
if err != nil {
return false, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
b64name := base64.StdEncoding.EncodeToString([]byte(name))
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
resp, err := f.oAuthClient.Do(req)
retry, err := shouldRetry(err)
if retry {
return true, err
}
if err != nil {
return false, err
}
if resp.StatusCode != 201 {
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
}
location = resp.Header.Get("location")
if location == "" {
return false, errors.New("empty location header from upload create")
}
return false, nil
})
return
}
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
if size == 0 {
err = f.pacer.Call(func() (bool, error) {
fs.Debugf(f, "Sending zero length chunk")
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
return shouldRetry(err)
})
return
}
var start int64
buf := make([]byte, defaultChunkSize)
for start < size {
reqSize := size - start
if reqSize >= int64(defaultChunkSize) {
reqSize = int64(defaultChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
// Transfer the chunk
err = f.pacer.Call(func() (bool, error) {
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
// TODO get file offset and seek to the position
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
return shouldRetry(err)
})
if err != nil {
return
}
start += reqSize
}
return
}
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
_, _ = chunk.Seek(0, io.SeekStart)
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
if err != nil {
return 0, err
}
req = req.WithContext(ctx)
res, err := f.oAuthClient.Do(req)
if err != nil {
return 0, err
}
defer func() {
_ = res.Body.Close()
}()
if res.StatusCode != 204 {
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
}
sfid := res.Header.Get("putio-file-id")
if sfid != "" {
fileID, err = strconv.ParseInt(sfid, 10, 64)
if err != nil {
return 0, err
}
}
return fileID, nil
}
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
req, err := http.NewRequest("PATCH", location, in)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
req.Header.Set("content-length", strconv.FormatInt(length, 10))
req.Header.Set("content-type", "application/offset+octet-stream")
return req, nil
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
root := strings.Trim(path.Join(f.root, dir), "/")
// can't remove root
if root == "" {
return errors.New("can't remove root directory")
}
// check directory exists
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return errors.Wrap(err, "Rmdir")
}
dirID := atoi(directoryID)
// check directory empty
var children []putio.File
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "listing files: %d", dirID)
children, _, err = f.client.Files.List(ctx, dirID)
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "Rmdir")
}
if len(children) != 0 {
return errors.New("directory not empty")
}
// remove it
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "deleting file: %d", dirID)
err = f.client.Files.Delete(ctx, dirID)
return shouldRetry(err)
})
f.dirCache.FlushDir(dir)
return err
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) (err error) {
// defer log.Trace(f, "")("err=%v", &err)
if f.root == "" {
return errors.New("can't purge root directory")
}
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
rootID := atoi(f.dirCache.RootID())
// Let putio delete the filesystem tree
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "deleting file: %d", rootID)
err = f.client.Files.Delete(ctx, rootID)
return shouldRetry(err)
})
f.dirCache.ResetRoot()
return err
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
srcObj, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantCopy
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", leaf)
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Move src to this remote using server side move operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
srcObj, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
params.Set("parent_id", directoryID)
params.Set("name", leaf)
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
// defer log.Trace(f, "src=%+v, srcRemote=%v, dstRemote", src, srcRemote, dstRemote)("err=%v", &err)
srcFs, ok := src.(*Fs)
if !ok {
return fs.ErrorCantDirMove
}
srcPath := path.Join(srcFs.root, srcRemote)
dstPath := path.Join(f.root, dstRemote)
// Refuse to move to or from the root
if srcPath == "" || dstPath == "" {
return errors.New("can't move root directory")
}
// find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
// find the root dst directory
if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
} else {
if f.dirCache.FoundRoot() {
return fs.ErrorDirExists
}
}
// Find ID of dst parent, creating subdirs if necessary
var leaf, dstDirectoryID string
findPath := dstRemote
if dstRemote == "" {
findPath = f.root
}
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
if err != nil {
return err
}
// Check destination does not exist
if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
if err == fs.ErrorDirNotFound {
// OK
} else if err != nil {
return err
} else {
return fs.ErrorDirExists
}
}
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
err = f.pacer.Call(func() (bool, error) {
params := url.Values{}
params.Set("file_id", srcID)
params.Set("parent_id", dstDirectoryID)
params.Set("name", leaf)
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
if err != nil {
return false, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
_, err = f.client.Do(req, nil)
return shouldRetry(err)
})
srcFs.dirCache.FlushDir(srcRemote)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// defer log.Trace(f, "")("usage=%+v, err=%v", usage, &err)
var ai putio.AccountInfo
err = f.pacer.Call(func() (bool, error) {
// fs.Debugf(f, "getting account info")
ai, err = f.client.Account.Info(ctx)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "about failed")
}
return &fs.Usage{
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
Used: fs.NewUsageValue(ai.Disk.Used), // bytes in use
Free: fs.NewUsageValue(ai.Disk.Avail), // bytes which can be uploaded before reaching the quota
}, nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {
// defer log.Trace(f, "")("")
f.dirCache.ResetRoot()
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) (err error) {
// defer log.Trace(f, "")("err=%v", &err)
return f.pacer.Call(func() (bool, error) {
req, err := f.client.NewRequest(ctx, "POST", "/v2/trash/empty", nil)
if err != nil {
return false, err
}
// fs.Debugf(f, "emptying trash")
_, err = f.client.Do(req, nil)
return shouldRetry(err)
})
}

View File

@@ -1,280 +0,0 @@
package putio
import (
"context"
"io"
"net/http"
"net/url"
"path"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a Putio object
//
// Putio Objects always have full metadata
type Object struct {
fs *Fs // what this object is part of
file *putio.File
remote string // The remote path
modtime time.Time
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer log.Trace(f, "remote=%v", remote)("o=%+v, err=%v", &o, &err)
obj := &Object{
fs: f,
remote: remote,
}
err = obj.readEntryAndSetMetadata(ctx)
if err != nil {
return nil, err
}
return obj, err
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info putio.File) (o fs.Object, err error) {
// defer log.Trace(f, "remote=%v, info=+v", remote, &info)("o=%+v, err=%v", &o, &err)
obj := &Object{
fs: f,
remote: remote,
}
err = obj.setMetadataFromEntry(info)
if err != nil {
return nil, err
}
return obj, err
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.CRC32 {
return "", hash.ErrUnsupported
}
err := o.readEntryAndSetMetadata(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read hash from metadata")
}
return o.file.CRC32, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
if o.file == nil {
return 0
}
return o.file.Size
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
if o.file == nil {
return ""
}
return itoa(o.file.ID)
}
// MimeType returns the content type of the Object if
// known, or "" if not
func (o *Object) MimeType(ctx context.Context) string {
err := o.readEntryAndSetMetadata(ctx)
if err != nil {
return ""
}
return o.file.ContentType
}
// setMetadataFromEntry sets the fs data from a putio.File
//
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info putio.File) error {
o.file = &info
o.modtime = info.UpdatedAt.Time
return nil
}
// Reads the entry for a file from putio
func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
// defer log.Trace(o, "")("f=%+v, err=%v", f, &err)
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
var resp struct {
File putio.File `json:"file"`
}
err = o.fs.pacer.Call(func() (bool, error) {
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.PathEscape(leaf), nil)
if err != nil {
return false, err
}
_, err = o.fs.client.Do(req, &resp)
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
return false, fs.ErrorObjectNotFound
}
return shouldRetry(err)
})
return &resp.File, err
}
// Read entry if not set and set metadata from it
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
if o.file != nil {
return nil
}
entry, err := o.readEntry(ctx)
if err != nil {
return err
}
return o.setMetadataFromEntry(*entry)
}
// Returns the remote path for the object
func (o *Object) remotePath() string {
return path.Join(o.fs.root, o.remote)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if o.modtime.IsZero() {
err := o.readEntryAndSetMetadata(ctx)
if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now()
}
}
return o.modtime
}
// SetModTime sets the modification time of the local fs object
//
// Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
// defer log.Trace(o, "modTime=%v", modTime.String())("err=%v", &err)
req, err := o.fs.client.NewRequest(ctx, "POST", "/v2/files/touch?file_id="+strconv.FormatInt(o.file.ID, 10)+"&updated_at="+url.QueryEscape(modTime.Format(time.RFC3339)), nil)
if err != nil {
return err
}
// fs.Debugf(o, "setting modtime: %s", modTime.String())
_, err = o.fs.client.Do(req, nil)
if err != nil {
return err
}
o.modtime = modTime
if o.file != nil {
o.file.UpdatedAt.Time = modTime
}
return nil
}
// Storable returns whether this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
// defer log.Trace(o, "")("err=%v", &err)
var storageURL string
err = o.fs.pacer.Call(func() (bool, error) {
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
return shouldRetry(err)
})
if err != nil {
return
}
var resp *http.Response
headers := fs.OpenOptionHeaders(options)
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
if err != nil {
return shouldRetry(err)
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("User-Agent", o.fs.client.UserAgent)
// merge headers with extra headers
for header, value := range headers {
req.Header.Set(header, value)
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = http.DefaultClient.Do(req)
return shouldRetry(err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
_ = resp.Body.Close()
return nil, fserrors.NoRetryError(err)
}
return resp.Body, err
}
// Update the already existing object
//
// Copy the reader into the object updating modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
fs.Logf(o, "File name disallowed - not uploading")
return nil
}
err = o.Remove(ctx)
if err != nil {
return err
}
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
if err != nil {
return err
}
*o = *(newObj.(*Object))
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
// defer log.Trace(o, "")("err=%v", &err)
return o.fs.pacer.Call(func() (bool, error) {
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
err = o.fs.client.Files.Delete(ctx, o.file.ID)
return shouldRetry(err)
})
}

View File

@@ -1,72 +0,0 @@
package putio
import (
"log"
"regexp"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
// Constants
const (
rcloneClientID = "4131"
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultChunkSize = 48 * fs.MebiByte
)
var (
// Description of how to auth for this app
putioConfig = &oauth2.Config{
Scopes: []string{},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
// A regexp matching path names for ignoring unnecessary files
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r)$`)
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "putio",
Description: "Put.io",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
err := oauthutil.ConfigNoOffline("putio", name, m, putioConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
})
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,16 +0,0 @@
// Test Put.io filesystem interface
package putio
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPutio:",
NilObject: (*Object)(nil),
})
}

View File

@@ -14,6 +14,7 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -23,10 +24,9 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
qs "github.com/yunify/qingstor-sdk-go/v3/service"
qsConfig "github.com/yunify/qingstor-sdk-go/config"
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
qs "github.com/yunify/qingstor-sdk-go/service"
)
// Register with Fs
@@ -146,15 +146,16 @@ type Options struct {
// Fs represents a remote qingstor server
type Fs struct {
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
name string // The name of the remote
root string // The root is a subdir, is a special object
opt Options // parsed options
features *fs.Features // optional features
svc *qs.Service // The connection to the qingstor server
zone string // The zone we are working on
bucket string // The bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
}
// Object describes a qingstor object
@@ -175,23 +176,22 @@ type Object struct {
// ------------------------------------------------------------
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a qingstor path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a qingstor 'url'
func qsParsePath(path string) (bucket, key string, err error) {
// Pattern to match a qingstor path
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
} else {
bucket, key = parts[1], parts[2]
key = strings.Trim(key, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// Split an URL into three parts: protocol host and port
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
/*
@@ -301,12 +301,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -323,6 +317,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "qingstor: upload cutoff")
}
bucket, key, err := qsParsePath(root)
if err != nil {
return nil, err
}
svc, err := qsServiceConnection(opt)
if err != nil {
return nil, err
@@ -333,33 +331,36 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
f := &Fs{
name: name,
opt: *opt,
svc: svc,
zone: opt.Zone,
cache: bucket.NewCache(),
name: name,
root: key,
opt: *opt,
svc: svc,
zone: opt.Zone,
bucket: bucket,
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the object exists
bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
if f.root != "" {
if !strings.HasSuffix(f.root, "/") {
f.root += "/"
}
//Check to see if the object exists
bucketInit, err := svc.Bucket(bucket, opt.Zone)
if err != nil {
return nil, err
}
_, err = bucketInit.HeadObject(f.rootDirectory, &qs.HeadObjectInput{})
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(key)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -374,18 +375,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("QingStor root")
if f.root == "" {
return fmt.Sprintf("QingStor bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
}
return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
}
// Precision of the remote
@@ -425,8 +426,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -435,21 +435,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
source := path.Join("/", srcBucket, srcPath)
srcFs := srcObj.fs
key := f.root + remote
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
req := qs.PutObjectInput{
XQSCopySource: &source,
}
bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return nil, err
}
_, err = bucketInit.PutObject(dstPath, &req)
_, err = bucketInit.PutObject(key, &req)
if err != nil {
// fs.Debugf(f, "Copy Failed, API Error: %v", err)
fs.Debugf(f, "Copy Failed, API Error: %v", err)
return nil, err
}
return f.NewObject(ctx, remote)
@@ -510,27 +511,29 @@ type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
prefix := f.root
if dir != "" {
prefix += dir + "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
maxLimit := int(listLimitSize)
var marker *string
for {
bucketInit, err := f.svc.Bucket(bucket, f.zone)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
// FIXME need to implement ALL loop
req := qs.ListObjectsInput{
Delimiter: &delimiter,
Prefix: &directory,
Prefix: &prefix,
Limit: &maxLimit,
Marker: marker,
}
@@ -543,6 +546,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix == nil {
@@ -550,17 +554,15 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote := *commonPrefix
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &qs.KeyType{Key: &remote}, true)
if err != nil {
return err
@@ -570,25 +572,19 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
for _, object := range resp.Keys {
key := qs.StringValue(object.Key)
if !strings.HasPrefix(key, prefix) {
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote := key[rootLength:]
err = fn(remote, object, false)
if err != nil {
return err
}
}
if resp.HasMore != nil && !*resp.HasMore {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
fs.Errorf(f, "Expecting NextMarker but didn't find one")
//marker = resp.Keys[len(resp.Keys)-1].Key
break
} else {
marker = resp.NextMarker
@@ -614,10 +610,20 @@ func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool)
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
@@ -631,12 +637,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := qs.ListBucketsInput{
Location: &f.zone,
}
@@ -662,14 +672,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -689,105 +695,106 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Check if the bucket exists
func (f *Fs) dirExists() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return false, err
}
_, err = bucketInit.Head()
if err == nil {
return true, nil
}
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
err = nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
bucketInit, err := f.svc.Bucket(bucket, f.zone)
if err != nil {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
default:
break
}
break
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
/* When delete a bucket, qingstor need about 60 second to sync status;
So, need wait for it sync end if we try to operation a just deleted bucket
*/
wasDeleted := false
retries := 0
for retries <= 120 {
statistics, err := bucketInit.GetStatistics()
if statistics == nil || err != nil {
break
}
switch *statistics.Status {
case "deleted":
fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
wasDeleted = true
continue
default:
break
}
break
}
}
retries = 0
for retries <= 120 {
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
if wasDeleted {
fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
time.Sleep(time.Second * 1)
retries++
continue
}
err = nil
}
}
break
_, err = bucketInit.Put()
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusConflict {
err = nil
}
return err
}, nil)
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
}
return err
}
// bucketIsEmpty check if the bucket empty
func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
bucketInit, err := f.svc.Bucket(bucket, f.zone)
// dirIsEmpty check if the bucket empty
func (f *Fs) dirIsEmpty() (bool, error) {
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return true, err
}
@@ -805,64 +812,71 @@ func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
// Rmdir delete a bucket
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
isEmpty, err := f.bucketIsEmpty(bucket)
isEmpty, err := f.dirIsEmpty()
if err != nil {
return err
}
if !isEmpty {
// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
}
return f.cache.Remove(bucket, func() error {
// fs.Debugf(f, "Deleting the bucket %s", bucket)
bucketInit, err := f.svc.Bucket(bucket, f.zone)
if err != nil {
return err
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
if err != nil {
return err
})
}
retries := 0
for retries <= 10 {
_, delErr := bucketInit.Delete()
if delErr != nil {
if e, ok := delErr.(*qsErr.QingStorError); ok {
switch e.Code {
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
// wait for lease status ready
case "lease_not_ready":
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
retries++
time.Sleep(time.Second * 1)
continue
default:
err = e
break
}
}
} else {
err = delErr
}
break
}
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
bucket, bucketPath := o.split()
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
// fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
key := o.fs.root + o.remote
fs.Debugf(o, "Read metadata of key: %s", key)
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
if err != nil {
// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
if e, ok := err.(*qsErr.QingStorError); ok {
if e.StatusCode == http.StatusNotFound {
return fs.ErrorObjectNotFound
@@ -924,10 +938,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil
}
// Copy the object to itself to update the metadata
bucket, bucketPath := o.split()
sourceKey := path.Join("/", bucket, bucketPath)
key := o.fs.root + o.remote
sourceKey := path.Join("/", o.fs.bucket, key)
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
@@ -936,21 +950,20 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
XQSCopySource: &sourceKey,
ContentType: &mimeType,
}
_, err = bucketInit.PutObject(bucketPath, &req)
_, err = bucketInit.PutObject(key, &req)
return err
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucket, bucketPath := o.split()
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return nil, err
}
key := o.fs.root + o.remote
req := qs.GetObjectInput{}
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
@@ -962,7 +975,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
}
}
}
resp, err := bucketInit.GetObject(bucketPath, &req)
resp, err := bucketInit.GetObject(key, &req)
if err != nil {
return nil, err
}
@@ -972,21 +985,21 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// Update in to the object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
key := o.fs.root + o.remote
// Guess the content type
mimeType := fs.MimeType(ctx, src)
req := uploadInput{
body: in,
qsSvc: o.fs.svc,
bucket: bucket,
bucket: o.fs.bucket,
zone: o.fs.zone,
key: bucketPath,
key: key,
mimeType: mimeType,
partSize: int64(o.fs.opt.ChunkSize),
concurrency: o.fs.opt.UploadConcurrency,
@@ -1010,12 +1023,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
if err != nil {
return err
}
_, err = bucketInit.DeleteObject(bucketPath)
key := o.fs.root + o.remote
_, err = bucketInit.DeleteObject(key)
return err
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
qs "github.com/yunify/qingstor-sdk-go/v3/service"
qs "github.com/yunify/qingstor-sdk-go/service"
)
const (

View File

@@ -23,6 +23,7 @@ import (
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -45,7 +46,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -798,9 +798,10 @@ type Fs struct {
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
@@ -818,7 +819,6 @@ type Object struct {
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
storageClass string // eg GLACIER
}
// ------------------------------------------------------------
@@ -830,18 +830,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return fmt.Sprintf("S3 root")
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
if f.rootDirectory == "" {
return fmt.Sprintf("S3 bucket %s", f.rootBucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
@@ -868,16 +868,14 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket - can only update if running from a bucket
if f.rootBucket != "" {
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket(f.rootBucket)
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket()
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
@@ -890,23 +888,21 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a s3 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a s3 'url'
func s3ParsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
@@ -1043,12 +1039,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1065,6 +1055,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
if opt.ACL == "" {
opt.ACL = "private"
}
@@ -1076,39 +1070,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
name: name,
root: directory,
opt: *opt,
c: c,
bucket: bucket,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
srv: fshttp.NewClient(fs.Config),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.rootBucket != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists
req := s3.HeadObjectInput{
Bucket: &f.rootBucket,
Key: &f.rootDirectory,
Bucket: &f.bucket,
Key: &directory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -1135,7 +1128,6 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
o.storageClass = aws.StringValue(info.StorageClass)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
@@ -1152,9 +1144,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
}
// Gets the bucket location
func (f *Fs) getBucketLocation(bucket string) (string, error) {
func (f *Fs) getBucketLocation() (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &bucket,
Bucket: &f.bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
@@ -1170,8 +1162,8 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket(bucket string) error {
region, err := f.getBucketLocation(bucket)
func (f *Fs) updateRegionForBucket() error {
region, err := f.getBucketLocation()
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
@@ -1199,18 +1191,15 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list lists the objects into the function supplied from
// the bucket and directory supplied. The remote has prefix
// removed from it and if addBucket is set then it adds the
// bucket to the start.
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
root := f.root
if dir != "" {
root += dir + "/"
}
maxKeys := int64(listChunkSize)
delimiter := ""
@@ -1221,9 +1210,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &bucket,
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &directory,
Prefix: &root,
MaxKeys: &maxKeys,
Marker: marker,
}
@@ -1239,19 +1228,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
@@ -1259,14 +1238,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, prefix) {
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
@@ -1277,18 +1253,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
}
for _, object := range resp.Contents {
remote := aws.StringValue(object.Key)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote = remote[len(prefix):]
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
remote := key[rootLength:]
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
remote = remote[:len(remote)-1]
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
@@ -1329,10 +1309,20 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Objec
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
@@ -1346,12 +1336,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
@@ -1362,9 +1355,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
return nil, err
}
for _, bucket := range resp.Buckets {
bucketName := aws.StringValue(bucket.Name)
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
entries = append(entries, d)
}
return entries, nil
@@ -1380,14 +1371,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
if f.bucket == "" {
return f.listBuckets(ctx, dir)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -1405,45 +1392,24 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucket == "" {
entries, err := f.listBuckets(ctx)
err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucket := entry.Remote()
err = listR(bucket, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
}
} else {
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
@@ -1465,9 +1431,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
func (f *Fs) dirExists(ctx context.Context) (bool, error) {
req := s3.HeadBucketInput{
Bucket: &bucket,
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
@@ -1486,61 +1452,68 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucket, _ := f.split(dir)
return f.makeBucket(ctx, bucket)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
req := s3.CreateBucketInput{
Bucket: &bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
}
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}, func() (bool, error) {
return f.bucketExists(ctx, bucket)
}
if !f.bucketDeleted {
exists, err := f.dirExists(ctx)
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucket, directory := f.split(dir)
if bucket == "" || directory != "" {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
return f.cache.Remove(bucket, func() error {
req := s3.DeleteBucketInput{
Bucket: &bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucket)
}
return err
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
fs.Infof(f, "Bucket deleted")
}
return err
}
// Precision of the remote
@@ -1554,31 +1527,6 @@ func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// copy does a server side copy
//
// It adds the boiler plate to the req passed in and calls the s3
// method
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string) error {
req.Bucket = &dstBucket
req.ACL = &f.opt.ACL
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
req.CopySource = &source
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if req.StorageClass == nil && f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
return f.pacer.Call(func() (bool, error) {
_, err := f.c.CopyObjectWithContext(ctx, req)
return f.shouldRetry(err)
})
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
@@ -1589,8 +1537,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstBucket, dstPath := f.split(remote)
err := f.makeBucket(ctx, dstBucket)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -1599,11 +1546,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcBucket, srcPath := srcObj.split()
srcFs := srcObj.fs
key := f.root + remote
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{
Bucket: &f.bucket,
ACL: &f.opt.ACL,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath)
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObjectWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
@@ -1675,10 +1640,10 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.meta != nil {
return nil
}
bucket, bucketPath := o.split()
key := o.fs.root + o.remote
req := s3.HeadObjectInput{
Bucket: &bucket,
Key: &bucketPath,
Bucket: &o.fs.bucket,
Key: &key,
}
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1703,7 +1668,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
@@ -1754,19 +1718,39 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return nil
}
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
}
// Guess the content type
mimeType := fs.MimeType(ctx, o)
// Copy the object to itself to update the metadata
bucket, bucketPath := o.split()
key := o.fs.root + o.remote
sourceKey := o.fs.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
CopySource: aws.String(pathEscape(sourceKey)),
Metadata: o.meta,
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
MetadataDirective: &directive,
}
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass == "GLACIER" || o.fs.opt.StorageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
return err
}
// Storable raturns a boolean indicating if this object is storable
@@ -1776,12 +1760,11 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split()
key := o.fs.root + o.remote
req := s3.GetObjectInput{
Bucket: &bucket,
Key: &bucketPath,
Bucket: &o.fs.bucket,
Key: &key,
}
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
@@ -1801,7 +1784,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
}
}
if err != nil {
@@ -1812,8 +1795,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
err := o.fs.makeBucket(ctx, bucket)
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
@@ -1866,11 +1848,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Guess the content type
mimeType := fs.MimeType(ctx, src)
key := o.fs.root + o.remote
if multipart {
req := s3manager.UploadInput{
Bucket: &bucket,
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &bucketPath,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
@@ -1894,9 +1878,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
} else {
req := s3.PutObjectInput{
Bucket: &bucket,
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &bucketPath,
Key: &key,
ContentType: &mimeType,
Metadata: metadata,
}
@@ -1925,10 +1909,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.Wrap(err, "s3 upload: sign request")
}
if o.fs.opt.V2Auth && headers == nil {
headers = putObj.HTTPRequest.Header
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
@@ -1939,7 +1919,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
httpReq = httpReq.WithContext(ctx)
// set the headers we signed and the length
httpReq.Header = headers
@@ -1973,10 +1953,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
key := o.fs.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &bucket,
Key: &bucketPath,
Bucket: &o.fs.bucket,
Key: &key,
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
@@ -1995,31 +1975,6 @@ func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToUpper(tier)
bucket, bucketPath := o.split()
req := s3.CopyObjectInput{
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
StorageClass: aws.String(tier),
}
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
if err != nil {
return err
}
o.storageClass = tier
return err
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageClass == "" {
return "STANDARD"
}
return o.storageClass
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -2028,6 +1983,4 @@ var (
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -11,9 +11,8 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:",
NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
RemoteName: "TestS3:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},

View File

@@ -36,8 +36,7 @@ import (
)
const (
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
hashCommandNotSupported = "none"
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
)
var (
@@ -103,14 +102,9 @@ when the ssh-agent contains many keys.`,
Default: false,
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
}, {
Name: "ask_password",
Default: false,
Help: `Allow asking for SFTP password when needed.
If this is set and no password is supplied then rclone will:
- ask for a password
- not contact the ssh agent
`,
Name: "ask_password",
Default: false,
Help: "Allow asking for SFTP password when needed.",
Advanced: true,
}, {
Name: "path_override",
@@ -133,16 +127,6 @@ Home directory can be found in a shared folder called "home"
Default: true,
Help: "Set the modified time on the remote if set.",
Advanced: true,
}, {
Name: "md5sum_command",
Default: "",
Help: "The command used to read md5 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "sha1sum_command",
Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true,
}},
}
fs.Register(fsi)
@@ -162,17 +146,14 @@ type Options struct {
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
}
// Fs stores the interface to the remote SFTP files
type Fs struct {
name string
root string
opt Options // parsed options
m configmap.Mapper // config
features *fs.Features // optional features
opt Options // parsed options
features *fs.Features // optional features
config *ssh.ClientConfig
url string
mkdirLock *stringLock
@@ -369,7 +350,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
keyFile := env.ShellExpand(opt.KeyFile)
// Add ssh agent-auth if no password or file specified
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword) || opt.KeyUseAgent {
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New()
if err != nil {
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
@@ -440,17 +421,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
return NewFsWithConnection(ctx, name, root, opt, sshConfig)
}
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
func NewFsWithConnection(ctx context.Context, name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
opt: *opt,
m: m,
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
@@ -776,79 +756,45 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// run runds cmd on the remote end returning standard output
func (f *Fs) run(cmd string) ([]byte, error) {
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP connection")
}
defer f.putSftpConnection(&c, err)
session, err := c.sshClient.NewSession()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP sessiion")
}
defer func() {
_ = session.Close()
}()
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
err = session.Run(cmd)
if err != nil {
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
}
return stdout.Bytes(), nil
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
if f.opt.DisableHashCheck {
return hash.Set(hash.None)
}
if f.cachedHashes != nil {
return *f.cachedHashes
}
// look for a hash command which works
checkHash := func(commands []string, expected string, hashCommand *string, changed *bool) bool {
if *hashCommand == hashCommandNotSupported {
return false
}
if *hashCommand != "" {
return true
}
*changed = true
for _, command := range commands {
output, err := f.run(command)
if err != nil {
continue
}
output = bytes.TrimSpace(output)
fs.Debugf(f, "checking %q command: %q", command, output)
if parseHash(output) == expected {
*hashCommand = command
return true
}
}
*hashCommand = hashCommandNotSupported
return false
if f.opt.DisableHashCheck {
return hash.Set(hash.None)
}
changed := false
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
if changed {
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
c, err := f.getSftpConnection()
if err != nil {
fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err)
return hash.Set(hash.None)
}
defer f.putSftpConnection(&c, err)
session, err := c.sshClient.NewSession()
if err != nil {
return hash.Set(hash.None)
}
sha1Output, _ := session.Output("echo 'abc' | sha1sum")
expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451"
_ = session.Close()
session, err = c.sshClient.NewSession()
if err != nil {
return hash.Set(hash.None)
}
md5Output, _ := session.Output("echo 'abc' | md5sum")
expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1"
_ = session.Close()
sha1Works := parseHash(sha1Output) == expectedSha1
md5Works := parseHash(md5Output) == expectedMd5
set := hash.NewHashSet()
if !sha1Works && !md5Works {
set.Add(hash.None)
}
if sha1Works {
set.Add(hash.SHA1)
}
@@ -856,12 +802,26 @@ func (f *Fs) Hashes() hash.Set {
set.Add(hash.MD5)
}
_ = session.Close()
f.cachedHashes = &set
return set
}
// About gets usage stats
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "About get SFTP connection")
}
session, err := c.sshClient.NewSession()
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "About put SFTP connection")
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(f.root)
if f.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
@@ -869,12 +829,14 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if len(escapedPath) == 0 {
escapedPath = "/"
}
stdout, err := f.run("df -k " + escapedPath)
err = session.Run("df -k " + escapedPath)
if err != nil {
return nil, errors.Wrap(err, "your remote may not support About")
_ = session.Close()
return nil, errors.Wrap(err, "About invocation of df failed. Your remote may not support about.")
}
_ = session.Close()
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
usageTotal, usageUsed, usageAvail := parseUsage(stdout.Bytes())
usage := &fs.Usage{}
if usageTotal >= 0 {
usage.Total = fs.NewUsageValue(usageTotal)
@@ -909,27 +871,23 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if o.fs.opt.DisableHashCheck {
return "", nil
}
_ = o.fs.Hashes()
var hashCmd string
if r == hash.MD5 {
if o.md5sum != nil {
return *o.md5sum, nil
}
hashCmd = o.fs.opt.Md5sumCommand
hashCmd = "md5sum"
} else if r == hash.SHA1 {
if o.sha1sum != nil {
return *o.sha1sum, nil
}
hashCmd = o.fs.opt.Sha1sumCommand
hashCmd = "sha1sum"
} else {
return "", hash.ErrUnsupported
}
if hashCmd == "" || hashCmd == hashCommandNotSupported {
return "", hash.ErrUnsupported
if o.fs.opt.DisableHashCheck {
return "", nil
}
c, err := o.fs.getSftpConnection()

View File

@@ -8,8 +8,10 @@ import (
"fmt"
"io"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ncw/swift"
@@ -22,9 +24,7 @@ import (
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Constants
@@ -207,16 +207,17 @@ type Options struct {
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of container status
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
c *swift.Connection // the connection to the swift server
container string // the container we are working on
containerOKMu sync.Mutex // mutex to protect container OK
containerOK bool // true if we have created the container
segmentsContainer string // container to store the segments (if any) in
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
@@ -241,18 +242,18 @@ func (f *Fs) Name() string {
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
if f.root == "" {
return f.container
}
return f.container + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Swift root")
if f.root == "" {
return fmt.Sprintf("Swift container %s", f.container)
}
if f.rootDirectory == "" {
return fmt.Sprintf("Swift container %s", f.rootContainer)
}
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
return fmt.Sprintf("Swift container %s path %s", f.container, f.root)
}
// Features returns the optional features of this Fs
@@ -311,23 +312,21 @@ func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
return shouldRetry(err)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
// Pattern to match a swift path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a swift 'url'
func parsePath(path string) (container, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't find container in swift path %q", path)
} else {
container, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// swiftConnection makes a connection to swift
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
c := &swift.Connection{
@@ -410,48 +409,47 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
f := &Fs{
name: name,
opt: *opt,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
container, directory, err := parsePath(root)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
opt: *opt,
c: c,
container: container,
segmentsContainer: container + "_segments",
root: directory,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.rootContainer != "" && f.rootDirectory != "" {
if f.root != "" {
f.root += "/"
// Check to see if the object exists - ignoring directory markers
var info swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(f.rootContainer, f.rootDirectory)
info, rxHeaders, err = f.c.Object(container, directory)
return shouldRetryHeaders(rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
@@ -519,26 +517,23 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from
// the container and directory supplied. The remote has prefix
// removed from it and if addContainer is set then it adds the
// container to the start.
// the container and root supplied
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool, fn listFn) error {
prefix := root
if dir != "" {
prefix += dir + "/"
}
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: directory,
Prefix: prefix,
Limit: listChunks,
}
if !recurse {
opts.Delimiter = '/'
}
rootLength := len(root)
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
@@ -563,10 +558,7 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
// duplicate directories. Ignore them here.
continue
}
remote := object.Name[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
}
remote := object.Name[rootLength:]
err = fn(remote, object, isDirectory)
if err != nil {
break
@@ -580,8 +572,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, fn addEntryFn) error {
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
err := f.listContainerRoot(f.container, f.root, dir, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
@@ -605,13 +597,22 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
return err
}
// mark the container as being OK
func (f *Fs) markContainerOK() {
if f.container != "" {
f.containerOKMu.Lock()
f.containerOK = true
f.containerOKMu.Unlock()
}
}
// listDir lists a single directory
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
if f.container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(container, directory, prefix, addContainer, false, func(entry fs.DirEntry) error {
err = f.list(dir, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
@@ -619,12 +620,15 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
f.markContainerOK()
return entries, nil
}
// listContainers lists the containers
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(nil)
@@ -634,7 +638,6 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
return nil, errors.Wrap(err, "container listing failed")
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
entries = append(entries, d)
}
@@ -651,14 +654,10 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
if f.container == "" {
return f.listContainers(dir)
}
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
@@ -676,41 +675,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
if f.container == "" {
return errors.New("container needed for recursive list")
}
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(container, directory, prefix, addContainer, true, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
err = f.list(dir, true, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
return err
}
// container must be present if listing succeeded
f.markContainerOK()
return list.Flush()
}
@@ -759,57 +737,57 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(container)
return shouldRetryHeaders(rxHeaders, err)
})
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.containerOK {
return nil
}
// if we are at the root, then it is OK
if f.container == "" {
return nil
}
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(f.container)
return shouldRetryHeaders(rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(container, headers)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
}
}
return err
}, nil)
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(f.container, headers)
return shouldRetry(err)
})
}
if err == nil {
f.containerOK = true
}
return err
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
f.containerOKMu.Lock()
defer f.containerOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(container)
return shouldRetry(err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
}
return err
var err error
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerDelete(f.container)
return shouldRetry(err)
})
if err == nil {
f.containerOK = false
}
return err
}
@@ -828,7 +806,7 @@ func (f *Fs) Purge(ctx context.Context) error {
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, func(entry fs.DirEntry) error {
err := f.list("", true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
@@ -855,8 +833,7 @@ func (f *Fs) Purge(ctx context.Context) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
@@ -865,10 +842,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcContainer, srcPath := srcObj.split()
srcFs := srcObj.fs
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
@@ -977,9 +954,8 @@ func (o *Object) readMetaData() (err error) {
}
var info swift.Object
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(container, containerPath)
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
return shouldRetryHeaders(h, err)
})
if err != nil {
@@ -1036,9 +1012,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
newHeaders[k] = v
}
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
return shouldRetry(err)
})
}
@@ -1056,10 +1031,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.FixRangeOption(options, o.size)
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
return shouldRetryHeaders(rxHeaders, err)
})
return
@@ -1077,20 +1051,20 @@ func min(x, y int64) int64 {
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
segmentsRoot := o.fs.root + o.remote + "/"
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, "", true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) {
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
return nil
}
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
segmentPath := segmentsRoot + remote
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
var err error
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
return shouldRetry(err)
})
})
@@ -1099,11 +1073,11 @@ func (o *Object) removeSegments(except string) error {
}
// remove the segments container if empty, ignore errors
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerDelete(segmentsContainer)
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
return shouldRetry(err)
})
if err == nil {
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
}
return nil
}
@@ -1128,13 +1102,11 @@ func urlEncode(str string) string {
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer)
return shouldRetryHeaders(rxHeaders, err)
})
if err == swift.ContainerNotFound {
@@ -1143,7 +1115,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
return shouldRetry(err)
})
}
@@ -1154,7 +1126,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
for {
@@ -1163,7 +1135,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
if left > 0 {
return "", err // read less than expected
}
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
break
}
n := int64(o.fs.opt.ChunkSize)
@@ -1174,45 +1146,46 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
}
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
deleteChunks(o, segmentInfos)
segmentInfos = nil
return "", err
}
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
manifestName := o.fs.root + o.remote
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
deleteChunks(o, segmentsContainer, segmentInfos)
deleteChunks(o, segmentInfos)
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
func deleteChunks(o *Object, segmentInfos []string) {
if segmentInfos != nil && len(segmentInfos) > 0 {
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(segmentsContainer, v)
fs.Debugf(o, "Delete segment file %q on %q", v, o.fs.segmentsContainer)
e := o.fs.c.ObjectDelete(o.fs.segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, o.fs.segmentsContainer, e)
}
}
}
@@ -1222,11 +1195,10 @@ func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
container, containerPath := o.split()
if container == "" {
return fserrors.FatalError(errors.New("can't upload files to the root"))
if o.fs.container == "" {
return fserrors.FatalError(errors.New("container name needed in remote"))
}
err := o.fs.makeContainer(ctx, container)
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
@@ -1252,17 +1224,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
o.headers = nil // wipe old metadata
} else {
var inCount *readers.CountingReader
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
} else {
// otherwise count the size for later
inCount = readers.NewCountingReader(in)
in = inCount
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err)
})
if err != nil {
@@ -1275,10 +1242,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = rxHeaders["ETag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {
// update the size if streaming from the reader
o.size = int64(inCount.BytesRead())
}
}
// If file was a dynamic large object then remove old/all segments
@@ -1295,14 +1258,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
container, containerPath := o.split()
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
return shouldRetry(err)
})
if err != nil {

View File

@@ -2,19 +2,10 @@
package swift
import (
"bytes"
"context"
"io"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestIntegration runs integration tests against the remote
@@ -30,50 +21,3 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
// Check that PutStream works with NoChunk as it is the major code
// deviation
func (f *Fs) testNoChunk(t *testing.T) {
ctx := context.Background()
f.opt.NoChunk = true
defer func() {
f.opt.NoChunk = false
}()
file := fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: "piped data no chunk.txt",
Size: -1, // use unknown size during upload
}
const contentSize = 100
contents := random.String(contentSize)
buf := bytes.NewBufferString(contents)
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
file.Hashes = uploadHash.Sums()
file.Size = int64(contentSize) // use correct size when checking
file.Check(t, obj, f.Precision())
// Re-read the object and check again
obj, err = f.NewObject(ctx, file.Path)
require.NoError(t, err)
file.Check(t, obj, f.Precision())
// Delete the object
assert.NoError(t, obj.Remove(ctx))
}
// Additional tests that aren't in the framework
func (f *Fs) InternalTest(t *testing.T) {
t.Run("NoChunk", f.testNoChunk)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -21,7 +21,7 @@ import (
func init() {
fsi := &fs.RegInfo{
Name: "union",
Description: "Union merges the contents of several remotes",
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
NewFs: NewFs,
Options: []fs.Option{{
Name: "remotes",

View File

@@ -3,7 +3,6 @@ package odrvcookie
import (
"bytes"
"context"
"encoding/xml"
"html/template"
"net/http"
@@ -92,8 +91,8 @@ func New(pUser, pPass, pEndpoint string) CookieAuth {
// Cookies creates a CookieResponse. It fetches the auth token and then
// retrieves the Cookies
func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
tokenResp, err := ca.getSPToken(ctx)
func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
tokenResp, err := ca.getSPToken()
if err != nil {
return nil, err
}
@@ -141,7 +140,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
return &cookieResponse, nil
}
func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SuccessResponse, err error) {
func (ca *CookieAuth) getSPToken() (conf *SuccessResponse, err error) {
reqData := map[string]interface{}{
"Username": ca.user,
"Password": ca.pass,
@@ -161,7 +160,6 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SuccessResponse, er
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
client := fshttp.NewClient(fs.Config)
resp, err := client.Do(req)

View File

@@ -205,7 +205,7 @@ func itemIsDir(item *api.Response) bool {
}
// readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string) (info *api.Prop, err error) {
func (f *Fs) readMetaDataForPath(path string, depth string) (info *api.Prop, err error) {
// FIXME how do we read back additional properties?
opts := rest.Opts{
Method: "PROPFIND",
@@ -221,7 +221,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
var result api.Multistatus
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return f.shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -229,7 +229,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
switch apiErr.StatusCode {
case http.StatusNotFound:
if f.retryWithZeroDepth && depth != "0" {
return f.readMetaDataForPath(ctx, path, "0")
return f.readMetaDataForPath(path, "0")
}
return nil, fs.ErrorObjectNotFound
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
@@ -353,7 +353,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
}
f.srv.SetErrorHandler(errorHandler)
err = f.setQuirks(ctx, opt.Vendor)
err = f.setQuirks(opt.Vendor)
if err != nil {
return nil, err
}
@@ -424,7 +424,7 @@ func (f *Fs) fetchAndSetBearerToken() error {
}
// setQuirks adjusts the Fs for the vendor passed in
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
func (f *Fs) setQuirks(vendor string) error {
switch vendor {
case "owncloud":
f.canStream = true
@@ -440,13 +440,13 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
// They have to be set instead of BasicAuth
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL)
spCookies, err := spCk.Cookies(ctx)
spCookies, err := spCk.Cookies()
if err != nil {
return err
}
odrvcookie.NewRenew(12*time.Hour, func() {
spCookies, err := spCk.Cookies(ctx)
spCookies, err := spCk.Cookies()
if err != nil {
fs.Errorf("could not renew cookies: %s", err.Error())
return
@@ -477,7 +477,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Prop) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -487,7 +487,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Pro
// Set info
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx) // reads info and meta, returning an error
err = o.readMetaData() // reads info and meta, returning an error
}
if err != nil {
return nil, err
@@ -498,7 +498,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Pro
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// Read the normal props, plus the checksums
@@ -528,7 +528,7 @@ type listAllFn func(string, bool, *api.Prop) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth string, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "PROPFIND",
Path: f.dirPath(dir), // FIXME Should not start with /
@@ -542,7 +542,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
var result api.Multistatus
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
resp, err = f.srv.CallXML(&opts, nil, &result)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -550,7 +550,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
// does not exist
if apiErr.StatusCode == http.StatusNotFound {
if f.retryWithZeroDepth && depth != "0" {
return f.listAll(ctx, dir, directoriesOnly, filesOnly, "0", fn)
return f.listAll(dir, directoriesOnly, filesOnly, "0", fn)
}
return found, fs.ErrorDirNotFound
}
@@ -625,14 +625,14 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
var iErr error
_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
if isDir {
d := fs.NewDir(remote, time.Time(info.Modified))
// .SetID(info.ID)
// FIXME more info from dir? can set size, items?
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
o, err := f.newObjectWithInfo(remote, info)
if err != nil {
iErr = err
return true
@@ -696,7 +696,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
}
// low level mkdir, only makes the directory, doesn't attempt to create parents
func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
func (f *Fs) _mkdir(dirPath string) error {
// We assume the root is already created
if dirPath == "" {
return nil
@@ -711,7 +711,7 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
NoResponse: true,
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(ctx, &opts)
resp, err := f.srv.Call(&opts)
return f.shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
@@ -727,13 +727,13 @@ func (f *Fs) _mkdir(ctx context.Context, dirPath string) error {
// mkdir makes the directory and parents using native paths
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
// defer log.Trace(dirPath, "")("")
err := f._mkdir(ctx, dirPath)
err := f._mkdir(dirPath)
if apiErr, ok := err.(*api.Error); ok {
// parent does not exist so create it first then try again
if apiErr.StatusCode == http.StatusConflict {
err = f.mkParentDir(ctx, dirPath)
if err == nil {
err = f._mkdir(ctx, dirPath)
err = f._mkdir(dirPath)
}
}
}
@@ -749,17 +749,17 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// dirNotEmpty returns true if the directory exists and is not Empty
//
// if the directory does not exist then err will be ErrorDirNotFound
func (f *Fs) dirNotEmpty(ctx context.Context, dir string) (found bool, err error) {
return f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
func (f *Fs) dirNotEmpty(dir string) (found bool, err error) {
return f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
return true
})
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
if check {
notEmpty, err := f.dirNotEmpty(ctx, dir)
notEmpty, err := f.dirNotEmpty(dir)
if err != nil {
return err
}
@@ -775,7 +775,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, nil)
resp, err = f.srv.CallXML(&opts, nil, nil)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -789,7 +789,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
return f.purgeCheck(dir, true)
}
// Precision return the precision of this Fs
@@ -835,10 +835,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
},
}
if f.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -870,7 +870,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
return f.purgeCheck("", false)
}
// Move src to this remote using server side move operations.
@@ -904,7 +904,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
dstPath := f.filePath(dstRemote)
// Check if destination exists
_, err := f.dirNotEmpty(ctx, dstRemote)
_, err := f.dirNotEmpty(dstRemote)
if err == nil {
return fs.ErrorDirExists
}
@@ -934,7 +934,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
},
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -975,7 +975,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &q)
resp, err = f.srv.CallXML(&opts, nil, &q)
return f.shouldRetry(resp, err)
})
if err != nil {
@@ -1028,8 +1028,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -1053,11 +1052,11 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.remote, defaultDepth)
info, err := o.fs.readMetaDataForPath(o.remote, defaultDepth)
if err != nil {
return err
}
@@ -1069,7 +1068,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -1096,7 +1095,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1129,7 +1128,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.useOCMtime || o.fs.hasChecksums {
opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
}
if o.fs.hasChecksums {
// Set an upload checksum - prefer SHA1
@@ -1144,7 +1143,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
@@ -1160,7 +1159,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// read metadata from remote
o.hasMetaData = false
return o.readMetaData(ctx)
return o.readMetaData()
}
// Remove an object
@@ -1171,7 +1170,7 @@ func (o *Object) Remove(ctx context.Context) error {
NoResponse: true,
}
return o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.Call(ctx, &opts)
resp, err := o.fs.srv.Call(&opts)
return o.fs.shouldRetry(resp, err)
})
}

View File

@@ -200,7 +200,7 @@ func (f *Fs) dirPath(file string) string {
return path.Join(f.diskRoot, file) + "/"
}
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
func (f *Fs) readMetaDataForPath(path string, options *api.ResourceInfoRequestOptions) (*api.ResourceInfoResponse, error) {
opts := rest.Opts{
Method: "GET",
Path: "/resources",
@@ -226,7 +226,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
var info api.ResourceInfoResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
@@ -239,7 +239,6 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -285,7 +284,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
//request object meta info
// Check to see if the object exists and is a file
//request object meta info
if info, err := f.readMetaDataForPath(ctx, f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
if info, err := f.readMetaDataForPath(f.diskRoot, &api.ResourceInfoRequestOptions{}); err != nil {
} else {
if info.ResourceType == "file" {
@@ -302,7 +301,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs.DirEntry, error) {
switch object.ResourceType {
case "dir":
t, err := time.Parse(time.RFC3339Nano, object.Modified)
@@ -312,7 +311,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.Reso
d := fs.NewDir(remote, t).SetSize(object.Size)
return d, nil
case "file":
o, err := f.newObjectWithInfo(ctx, remote, object)
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
@@ -344,7 +343,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
Limit: limit,
Offset: offset,
}
info, err := f.readMetaDataForPath(ctx, root, opts)
info, err := f.readMetaDataForPath(root, opts)
if err != nil {
if apiErr, ok := err.(*api.ErrorResponse); ok {
@@ -361,7 +360,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//list all subdirs
for _, element := range info.Embedded.Items {
remote := path.Join(dir, element.Name)
entry, err := f.itemToDirEntry(ctx, remote, &element)
entry, err := f.itemToDirEntry(remote, &element)
if err != nil {
return nil, err
}
@@ -387,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
@@ -396,7 +395,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Res
if info != nil {
err = o.setMetaData(info)
} else {
err = o.readMetaData(ctx)
err = o.readMetaData()
if apiErr, ok := err.(*api.ErrorResponse); ok {
// does not exist
if apiErr.ErrorName == "DiskNotFoundError" {
@@ -413,7 +412,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Res
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
return f.newObjectWithInfo(remote, nil)
}
// Creates from the parameters passed in a half finished Object which
@@ -447,7 +446,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// CreateDir makes a directory
func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
func (f *Fs) CreateDir(path string) (err error) {
//fmt.Printf("CreateDir: %s\n", path)
var resp *http.Response
@@ -461,7 +460,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
opts.Parameters.Set("path", path)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -475,7 +474,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
// This really needs improvement and especially proper error checking
// but Yandex does not publish a List of possible errors and when they're
// expected to occur.
func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
func (f *Fs) mkDirs(path string) (err error) {
//trim filename from path
//dirString := strings.TrimSuffix(path, filepath.Base(path))
//trim "disk:" from path
@@ -484,7 +483,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
return nil
}
if err = f.CreateDir(ctx, dirString); err != nil {
if err = f.CreateDir(dirString); err != nil {
if apiErr, ok := err.(*api.ErrorResponse); ok {
// allready exists
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
@@ -494,7 +493,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
for _, element := range dirs {
if element != "" {
mkdirpath += element + "/" //path separator /
if err = f.CreateDir(ctx, mkdirpath); err != nil {
if err = f.CreateDir(mkdirpath); err != nil {
// ignore errors while creating dirs
}
}
@@ -506,7 +505,7 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
return err
}
func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error {
func (f *Fs) mkParentDirs(resPath string) error {
// defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists
if strings.HasSuffix(resPath, "/") {
@@ -516,17 +515,17 @@ func (f *Fs) mkParentDirs(ctx context.Context, resPath string) error {
if parent == "." {
parent = ""
}
return f.mkDirs(ctx, parent)
return f.mkDirs(parent)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
path := f.filePath(dir)
return f.mkDirs(ctx, path)
return f.mkDirs(path)
}
// waitForJob waits for the job with status in url to complete
func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
func (f *Fs) waitForJob(location string) (err error) {
opts := rest.Opts{
RootURL: location,
Method: "GET",
@@ -536,7 +535,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
var resp *http.Response
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -565,7 +564,7 @@ func (f *Fs) waitForJob(ctx context.Context, location string) (err error) {
return errors.Errorf("async operation didn't complete after %v", fs.Config.Timeout)
}
func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err error) {
func (f *Fs) delete(path string, hardDelete bool) (err error) {
opts := rest.Opts{
Method: "DELETE",
Path: "/resources",
@@ -578,7 +577,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
var resp *http.Response
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -596,19 +595,19 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
if err != nil {
return errors.Wrapf(err, "async info result not JSON: %q", body)
}
return f.waitForJob(ctx, info.HRef)
return f.waitForJob(info.HRef)
}
return nil
}
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
func (f *Fs) purgeCheck(dir string, check bool) error {
root := f.filePath(dir)
if check {
//to comply with rclone logic we check if the directory is empty before delete.
//send request to get list of objects in this directory.
info, err := f.readMetaDataForPath(ctx, root, &api.ResourceInfoRequestOptions{})
info, err := f.readMetaDataForPath(root, &api.ResourceInfoRequestOptions{})
if err != nil {
return errors.Wrap(err, "rmdir failed")
}
@@ -617,14 +616,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
}
//delete directory
return f.delete(ctx, root, false)
return f.delete(root, false)
}
// Rmdir deletes the container
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, true)
return f.purgeCheck(dir, true)
}
// Purge deletes all the files and the container
@@ -633,11 +632,11 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// deleting all the files quicker than just running Remove() on the
// result of List()
func (f *Fs) Purge(ctx context.Context) error {
return f.purgeCheck(ctx, "", false)
return f.purgeCheck("", false)
}
// copyOrMoves copies or moves directories or files depending on the method parameter
func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite bool) (err error) {
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/resources/" + method,
@@ -651,7 +650,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
var resp *http.Response
var body []byte
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
if err != nil {
return fserrors.ShouldRetry(err), err
}
@@ -669,7 +668,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
if err != nil {
return errors.Wrapf(err, "async info result not JSON: %q", body)
}
return f.waitForJob(ctx, info.HRef)
return f.waitForJob(info.HRef)
}
return nil
}
@@ -691,11 +690,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
dstPath := f.filePath(remote)
err := f.mkParentDirs(ctx, dstPath)
err := f.mkParentDirs(dstPath)
if err != nil {
return nil, err
}
err = f.copyOrMove(ctx, "copy", srcObj.filePath(), dstPath, false)
err = f.copyOrMove("copy", srcObj.filePath(), dstPath, false)
if err != nil {
return nil, errors.Wrap(err, "couldn't copy file")
@@ -721,11 +720,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
dstPath := f.filePath(remote)
err := f.mkParentDirs(ctx, dstPath)
err := f.mkParentDirs(dstPath)
if err != nil {
return nil, err
}
err = f.copyOrMove(ctx, "move", srcObj.filePath(), dstPath, false)
err = f.copyOrMove("move", srcObj.filePath(), dstPath, false)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
@@ -759,12 +758,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return errors.New("can't move root directory")
}
err := f.mkParentDirs(ctx, dstPath)
err := f.mkParentDirs(dstPath)
if err != nil {
return err
}
_, err = f.readMetaDataForPath(ctx, dstPath, &api.ResourceInfoRequestOptions{})
_, err = f.readMetaDataForPath(dstPath, &api.ResourceInfoRequestOptions{})
if apiErr, ok := err.(*api.ErrorResponse); ok {
// does not exist
if apiErr.ErrorName == "DiskNotFoundError" {
@@ -776,7 +775,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
err = f.copyOrMove(ctx, "move", srcPath, dstPath, false)
err = f.copyOrMove("move", srcPath, dstPath, false)
if err != nil {
return errors.Wrap(err, "couldn't move directory")
@@ -803,7 +802,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
@@ -820,7 +819,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
return "", errors.Wrap(err, "couldn't create public link")
}
info, err := f.readMetaDataForPath(ctx, f.filePath(remote), &api.ResourceInfoRequestOptions{})
info, err := f.readMetaDataForPath(f.filePath(remote), &api.ResourceInfoRequestOptions{})
if err != nil {
return "", err
}
@@ -841,7 +840,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
}
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
resp, err = f.srv.Call(&opts)
return shouldRetry(resp, err)
})
return err
@@ -858,7 +857,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var info api.DiskInfo
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
})
@@ -925,11 +924,11 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
}
// readMetaData reads ands sets the new metadata for a storage.Object
func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) readMetaData() (err error) {
if o.hasMetaData {
return nil
}
info, err := o.fs.readMetaDataForPath(ctx, o.filePath(), &api.ResourceInfoRequestOptions{})
info, err := o.fs.readMetaDataForPath(o.filePath(), &api.ResourceInfoRequestOptions{})
if err != nil {
return err
}
@@ -944,7 +943,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
@@ -954,8 +953,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx)
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
@@ -976,7 +974,7 @@ func (o *Object) Storable() bool {
return true
}
func (o *Object) setCustomProperty(ctx context.Context, property string, value string) (err error) {
func (o *Object) setCustomProperty(property string, value string) (err error) {
var resp *http.Response
opts := rest.Opts{
Method: "PATCH",
@@ -992,7 +990,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &cpr, nil)
resp, err = o.fs.srv.CallJSON(&opts, &cpr, nil)
return shouldRetry(resp, err)
})
return err
@@ -1003,7 +1001,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
// Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// set custom_property 'rclone_modified' of object to modTime
err := o.setCustomProperty(ctx, "rclone_modified", modTime.Format(time.RFC3339Nano))
err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano))
if err != nil {
return err
}
@@ -1025,7 +1023,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
opts.Parameters.Set("path", o.filePath())
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
resp, err = o.fs.srv.CallJSON(&opts, nil, &dl)
return shouldRetry(resp, err)
})
@@ -1040,7 +1038,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
@@ -1049,7 +1047,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err error) {
// prepare upload
var resp *http.Response
var ur api.AsyncInfo
@@ -1063,7 +1061,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
resp, err = o.fs.srv.CallJSON(&opts, nil, &ur)
return shouldRetry(resp, err)
})
@@ -1081,7 +1079,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
@@ -1099,13 +1097,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remote := o.filePath()
//create full path to file before upload.
err := o.fs.mkParentDirs(ctx, remote)
err := o.fs.mkParentDirs(remote)
if err != nil {
return err
}
//upload file
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
err = o.upload(in1, true, fs.MimeType(ctx, src))
if err != nil {
return err
}
@@ -1122,7 +1120,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.filePath(), false)
return o.fs.delete(o.filePath(), false)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python
"""
This is a tool to decrypt file names in rclone logs.
@@ -43,13 +43,13 @@ def map_log_file(crypt_map, log_file):
"""
with open(log_file) as fd:
for line in fd:
for cipher, plain in crypt_map.items():
for cipher, plain in crypt_map.iteritems():
line = line.replace(cipher, plain)
sys.stdout.write(line)
def main():
if len(sys.argv) < 3:
print("Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0])
print "Syntax: %s <crypt-mapping-file> <log-file>" % sys.argv[0]
raise SystemExit(1)
mapping_file, log_file = sys.argv[1:]
crypt_map = read_crypt_map(mapping_file)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
"""
Make backend documentation
"""
@@ -52,9 +52,9 @@ if __name__ == "__main__":
for backend in find_backends():
try:
alter_doc(backend)
except Exception as e:
print("Failed adding docs for %s backend: %s" % (backend, e))
except Exception, e:
print "Failed adding docs for %s backend: %s" % (backend, e)
failed += 1
else:
success += 1
print("Added docs for %d backends with %d failures" % (success, failed))
print "Added docs for %d backends with %d failures" % (success, failed)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/python
"""
Generate a markdown changelog for the rclone project
"""
@@ -99,11 +99,10 @@ def process_log(log):
def main():
if len(sys.argv) != 3:
print("Syntax: %s vX.XX vX.XY" % sys.argv[0], file=sys.stderr)
print >>sys.stderr, "Syntax: %s vX.XX vX.XY" % sys.argv[0]
sys.exit(1)
version, next_version = sys.argv[1], sys.argv[2]
log = subprocess.check_output(["git", "log", '''--pretty=format:%H|%an|%aI|%s'''] + [version+".."+next_version])
log = log.decode("utf-8")
by_category = process_log(log)
# Output backends first so remaining in by_category are core items
@@ -113,7 +112,7 @@ def main():
out("local", title="Local")
out("cache", title="Cache")
out("crypt", title="Crypt")
backend_names = sorted(x for x in list(by_category.keys()) if x in backends)
backend_names = sorted(x for x in by_category.keys() if x in backends)
for backend_name in backend_names:
if backend_name in backend_titles:
backend_title = backend_titles[backend_name]
@@ -124,7 +123,7 @@ def main():
# Split remaining in by_category into new features and fixes
new_features = defaultdict(list)
bugfixes = defaultdict(list)
for name, messages in by_category.items():
for name, messages in by_category.iteritems():
for message in messages:
if IS_FIX_RE.search(message):
bugfixes[name].append(message)

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python2
"""
Make single page versions of the documentation for release and
conversion into man pages etc.
@@ -18,7 +18,6 @@ docs = [
"docs.md",
"remote_setup.md",
"filtering.md",
"gui.md",
"rc.md",
"overview.md",
"flags.md",
@@ -41,7 +40,6 @@ docs = [
"hubic.md",
"jottacloud.md",
"koofr.md",
"mailru.md",
"mega.md",
"azureblob.md",
"onedrive.md",
@@ -49,8 +47,6 @@ docs = [
"qingstor.md",
"swift.md",
"pcloud.md",
"premiumizeme.md",
"putio.md",
"sftp.md",
"union.md",
"webdav.md",
@@ -119,8 +115,8 @@ def check_docs(docpath):
docs_set = set(docs)
if files == docs_set:
return
print("Files on disk but not in docs variable: %s" % ", ".join(files - docs_set))
print("Files in docs variable but not on disk: %s" % ", ".join(docs_set - files))
print "Files on disk but not in docs variable: %s" % ", ".join(files - docs_set)
print "Files in docs variable but not on disk: %s" % ", ".join(docs_set - files)
raise ValueError("Missing files")
def read_command(command):
@@ -143,7 +139,7 @@ def read_commands(docpath):
def main():
check_docs(docpath)
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
command_docs = read_commands(docpath)
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
@@ -157,7 +153,7 @@ def main():
if doc == "docs.md":
contents = re.sub(r"The main rclone commands.*?for the full list.", command_docs, contents, 0, re.S)
out.write(contents)
print("Written '%s'" % outfile)
print "Written '%s'" % outfile
if __name__ == "__main__":
main()

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/env python
"""
Update the authors.md file with the authors from the git log
"""
@@ -23,14 +23,13 @@ def add_email(name, email):
"""
adds the email passed in to the end of authors.md
"""
print("Adding %s <%s>" % (name, email))
print "Adding %s <%s>" % (name, email)
with open(AUTHORS, "a+") as fd:
print(" * %s <%s>" % (name, email), file=fd)
print >>fd, " * %s <%s>" % (name, email)
subprocess.check_call(["git", "commit", "-m", "Add %s to contributors" % name, AUTHORS])
def main():
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
out = out.decode("utf-8")
previous = load()
for line in out.split("\n"):

View File

@@ -174,11 +174,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
// If file exists then srcFileName != "", however if the file
// doesn't exist then we assume it is a directory...
if srcFileName != "" {
var err error
dstRemote, dstFileName, err = fspath.Split(dstRemote)
if err != nil {
log.Fatalf("Parsing %q failed: %v", args[1], err)
}
dstRemote, dstFileName = fspath.Split(dstRemote)
if dstRemote == "" {
dstRemote = "."
}
@@ -201,10 +197,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
dstRemote, dstFileName, err := fspath.Split(args[0])
if err != nil {
log.Fatalf("Parsing %q failed: %v", args[0], err)
}
dstRemote, dstFileName := fspath.Split(args[0])
if dstRemote == "" {
dstRemote = "."
}

View File

@@ -267,8 +267,8 @@ func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
stat.Blocks = fsBlocks // Total data blocks in file system.
stat.Bfree = fsBlocks // Free blocks in file system.
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Files = 1E9 // Total files in file system.
stat.Ffree = 1E9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
@@ -299,9 +299,6 @@ func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
return translateError(err), fhUnset
}
// FIXME add support for unknown length files setting direct_io
// See: https://github.com/billziss-gh/cgofuse/issues/38
return 0, fsys.openHandle(handle)
}

View File

@@ -1,15 +1,9 @@
package config
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"errors"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/cobra"
@@ -26,9 +20,6 @@ func init() {
configCommand.AddCommand(configUpdateCommand)
configCommand.AddCommand(configDeleteCommand)
configCommand.AddCommand(configPasswordCommand)
configCommand.AddCommand(configReconnectCommand)
configCommand.AddCommand(configDisconnectCommand)
configCommand.AddCommand(configUserInfoCommand)
}
var configCommand = &cobra.Command{
@@ -216,99 +207,3 @@ func argsToMap(args []string) (out rc.Params, err error) {
}
return out, nil
}
var configReconnectCommand = &cobra.Command{
Use: "reconnect remote:",
Short: `Re-authenticates user with remote.`,
Long: `
This reconnects remote: passed in to the cloud storage system.
To disconnect the remote use "rclone config disconnect".
This normally means going through the interactive oauth flow again.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
if err != nil {
return err
}
if fsInfo.Config == nil {
return errors.Errorf("%s: doesn't support Reconnect", configName)
}
fsInfo.Config(configName, config)
return nil
},
}
var configDisconnectCommand = &cobra.Command{
Use: "disconnect remote:",
Short: `Disconnects user from remote`,
Long: `
This disconnects the remote: passed in to the cloud storage system.
This normally means revoking the oauth token.
To reconnect use "rclone config reconnect".
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
doDisconnect := f.Features().Disconnect
if doDisconnect == nil {
return errors.Errorf("%v doesn't support Disconnect", f)
}
err := doDisconnect(context.Background())
if err != nil {
return errors.Wrap(err, "Disconnect call failed")
}
return nil
},
}
var (
jsonOutput bool
)
func init() {
configUserInfoCommand.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
}
var configUserInfoCommand = &cobra.Command{
Use: "userinfo remote:",
Short: `Prints info about logged in user of remote.`,
Long: `
This prints the details of the person logged in to the cloud storage
system.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
doUserInfo := f.Features().UserInfo
if doUserInfo == nil {
return errors.Errorf("%v doesn't support UserInfo", f)
}
u, err := doUserInfo(context.Background())
if err != nil {
return errors.Wrap(err, "UserInfo call failed")
}
if jsonOutput {
out := json.NewEncoder(os.Stdout)
out.SetIndent("", "\t")
return out.Encode(u)
}
var keys []string
var maxKeyLen int
for key := range u {
keys = append(keys, key)
if len(key) > maxKeyLen {
maxKeyLen = len(key)
}
}
sort.Strings(keys)
for _, key := range keys {
fmt.Printf("%*s: %s\n", maxKeyLen, key, u[key])
}
return nil
},
}

View File

@@ -4,18 +4,12 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
autoFilename = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
}
var commandDefintion = &cobra.Command{
@@ -24,22 +18,13 @@ var commandDefintion = &cobra.Command{
Long: `
Download urls content and copy it to destination
without saving it in tmp storage.
Setting --auto-filename flag will cause retrieving file name from url and using it in destination path.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
var dstFileName string
var fsdst fs.Fs
if autoFilename {
fsdst = cmd.NewFsDir(args[1:])
} else {
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
}
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
cmd.Run(true, true, command, func() error {
_, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
_, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0])
return err
})
},

View File

@@ -47,14 +47,10 @@ __rclone_custom_func() {
__rclone_init_completion -n : || return
fi
if [[ $cur != *:* ]]; then
local ifs=$IFS
IFS=$'\n'
local remotes=($(command rclone listremotes))
IFS=$ifs
local remote
for remote in "${remotes[@]}"; do
while IFS= read -r remote; do
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
done
done < <(command rclone listremotes)
if [[ ${COMPREPLY[@]} ]]; then
local paths=("$cur"*)
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
@@ -66,18 +62,14 @@ __rclone_custom_func() {
else
local prefix=
fi
local ifs=$IFS
IFS=$'\n'
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
IFS=$ifs
local line
for line in "${lines[@]}"; do
while IFS= read -r line; do
local reply=${prefix:+$prefix/}$line
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
done
[[ ! ${COMPREPLY[@]} || $(type -t compopt) != builtin ]] || compopt -o filenames
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
fi
[[ ! ${COMPREPLY[@]} || $(type -t compopt) != builtin ]] || compopt -o nospace
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
fi
}
`
@@ -308,7 +300,6 @@ func showBackend(name string) {
optionsType := "standard"
for _, opts := range []fs.Options{standardOptions, advancedOptions} {
if len(opts) == 0 {
optionsType = "advanced"
continue
}
fmt.Printf("### %s Options\n\n", strings.Title(optionsType))
@@ -316,11 +307,7 @@ func showBackend(name string) {
optionsType = "advanced"
for _, opt := range opts {
done[opt.Name] = struct{}{}
shortOpt := ""
if opt.ShortOpt != "" {
shortOpt = fmt.Sprintf(" / -%s", opt.ShortOpt)
}
fmt.Printf("#### --%s%s\n\n", opt.FlagName(backend.Prefix), shortOpt)
fmt.Printf("#### --%s\n\n", opt.FlagName(backend.Prefix))
fmt.Printf("%s\n\n", opt.Help)
fmt.Printf("- Config: %s\n", opt.Name)
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))

View File

@@ -58,7 +58,7 @@ a bit of go code for each one.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1e6, command, args)
cmd.CheckArgs(1, 1E6, command, args)
for i := range args {
f := cmd.NewFsDir(args[i : i+1])
cmd.Run(false, false, command, func() error {

View File

@@ -4,6 +4,7 @@ package mount
import (
"context"
"io"
"time"
"bazil.org/fuse"
@@ -73,9 +74,9 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR
return nil, translateError(err)
}
// If size unknown then use direct io to read
if handle.Node().DirEntry().Size() < 0 {
resp.Flags |= fuse.OpenDirectIO
// See if seeking is supported and set FUSE hint accordingly
if _, err = handle.Seek(0, io.SeekCurrent); err != nil {
resp.Flags |= fuse.OpenNonSeekable
}
return &FileHandle{handle}, nil

View File

@@ -58,8 +58,8 @@ func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.Sta
resp.Blocks = fsBlocks // Total data blocks in file system.
resp.Bfree = fsBlocks // Free blocks in file system.
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
resp.Files = 1e9 // Total files in file system.
resp.Ffree = 1e9 // Free files in file system.
resp.Files = 1E9 // Total files in file system.
resp.Ffree = 1E9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.

View File

@@ -70,7 +70,7 @@ func mountOptions(device string) (options []fuse.MountOption) {
if len(mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
}
if len(mountlib.ExtraFlags) > 0 {
if len(mountlib.ExtraOptions) > 0 {
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
}
return options

View File

@@ -3,15 +3,11 @@
package mount
import (
"runtime"
"testing"
"github.com/rclone/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
if runtime.NumCPU() <= 2 {
t.Skip("FIXME skipping mount tests as they lock up on <= 2 CPUs - See: https://github.com/rclone/rclone/issues/3154")
}
mounttest.RunTests(t, mount)
}

View File

@@ -16,7 +16,7 @@ import (
var (
// Flags
iterations = flag.Int("n", 1e6, "Iterations to try")
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
)

View File

@@ -17,7 +17,7 @@ import (
var (
// Flags
iterations = flag.Int("n", 1e6, "Iterations to try")
iterations = flag.Int("n", 1E6, "Iterations to try")
maxBlockSize = flag.Int("b", 1024*1024, "Max block size to read")
simultaneous = flag.Int("transfers", 16, "Number of simultaneous files to open")
seeksPerFile = flag.Int("seeks", 8, "Seeks per file")

View File

@@ -161,7 +161,10 @@ applications won't work with their files on an rclone mount without
Caching](#file-caching) section for more info.
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
Hubic) do not support the concept of empty directories, so empty
Hubic) won't work from the root - you will need to specify a bucket,
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
None of these support the concept of directories, so empty
directories will have a tendency to disappear once they fall out of
the directory cache.

View File

@@ -115,7 +115,7 @@ func newRun() *Run {
fstest.Initialise()
var err error
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote()
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
if err != nil {
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
}

View File

@@ -23,29 +23,10 @@ const (
logTimeFormat = "2006-01-02 15:04:05"
)
var (
initTerminal func() error
writeToTerminal func([]byte)
)
// Initialise the VT100 terminal
func initTerminalVT100() error {
return nil
}
// Write to the VT100 terminal
func writeToTerminalVT100(b []byte) {
_, _ = os.Stdout.Write(b)
}
// startProgress starts the progress bar printing
//
// It returns a func which should be called to stop the stats.
func startProgress() func() {
if os.Getenv("TERM") != "" {
initTerminal = initTerminalVT100
writeToTerminal = writeToTerminalVT100
}
err := initTerminal()
if err != nil {
fs.Errorf(nil, "Failed to start progress: %v", err)

View File

@@ -2,8 +2,12 @@
package cmd
func init() {
// Default terminal is VT100 for non Windows
initTerminal = initTerminalVT100
writeToTerminal = writeToTerminalVT100
import "os"
func initTerminal() error {
return nil
}
func writeToTerminal(b []byte) {
_, _ = os.Stdout.Write(b)
}

View File

@@ -16,13 +16,7 @@ var (
ansiParser *ansiterm.AnsiParser
)
func init() {
// Default terminal is Windows console for Windows
initTerminal = initTerminalWindows
writeToTerminal = writeToTerminalWindows
}
func initTerminalWindows() error {
func initTerminal() error {
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
if winEventHandler == nil {
err := syscall.GetLastError()
@@ -35,7 +29,7 @@ func initTerminalWindows() error {
return nil
}
func writeToTerminalWindows(b []byte) {
func writeToTerminal(b []byte) {
// Remove all non-ASCII characters until this is fixed
// https://github.com/Azure/go-ansiterm/issues/26
r := []rune(string(b))

View File

@@ -69,14 +69,13 @@ rclone rc server, eg:
Use "rclone rc" to see a list of all possible commands.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1e9, command, args)
cmd.CheckArgs(0, 1E9, command, args)
cmd.Run(false, false, command, func() error {
ctx := context.Background()
parseFlags()
if len(args) == 0 {
return list(ctx)
return list()
}
return run(ctx, args)
return run(args)
})
},
}
@@ -111,23 +110,14 @@ func setAlternateFlag(flagName string, output *string) {
// do a call from (path, in) to (out, err).
//
// if err is set, out may be a valid error return or it may be nil
func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err error) {
func doCall(path string, in rc.Params) (out rc.Params, err error) {
// If loopback set, short circuit HTTP request
if loopback {
call := rc.Calls.Get(path)
if call == nil {
return nil, errors.Errorf("method %q not found", path)
}
out, err = call.Fn(context.Background(), in)
if err != nil {
return nil, errors.Wrap(err, "loopback call failed")
}
// Reshape (serialize then deserialize) the data so it is in the form expected
err = rc.Reshape(&out, out)
if err != nil {
return nil, errors.Wrap(err, "loopback reshape failed")
}
return out, nil
return call.Fn(context.Background(), in)
}
// Do HTTP request
@@ -142,7 +132,6 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
if err != nil {
return nil, errors.Wrap(err, "failed to make request")
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("Content-Type", "application/json")
if authUser != "" || authPass != "" {
@@ -184,7 +173,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
}
// Run the remote control command passed in
func run(ctx context.Context, args []string) (err error) {
func run(args []string) (err error) {
path := strings.Trim(args[0], "/")
// parse input
@@ -210,7 +199,7 @@ func run(ctx context.Context, args []string) (err error) {
}
// Do the call
out, callErr := doCall(ctx, path, in)
out, callErr := doCall(path, in)
// Write the JSON blob to stdout if required
if out != nil && !noOutput {
@@ -224,8 +213,8 @@ func run(ctx context.Context, args []string) (err error) {
}
// List the available commands to stdout
func list(ctx context.Context) error {
list, err := doCall(ctx, "rc/list", nil)
func list() error {
list, err := doCall("rc/list", nil)
if err != nil {
return errors.Wrap(err, "failed to list")
}
@@ -238,7 +227,7 @@ func list(ctx context.Context) error {
if !ok {
return errors.New("bad JSON")
}
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], info["Path"])
fmt.Printf("### %s: %s\n\n", info["Path"], info["Title"])
fmt.Printf("%s\n\n", info["Help"])
if authRequired := info["AuthRequired"]; authRequired != nil {
if authRequired.(bool) {

View File

@@ -17,7 +17,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
)
@@ -55,23 +54,6 @@ See the [rc documentation](/rc/) for more info on the rc flags.
if err := checkRelease(rcflags.Opt.WebGUIUpdate); err != nil {
log.Fatalf("Error while fetching the latest release of rclone-webui-react %v", err)
}
if rcflags.Opt.NoAuth {
rcflags.Opt.NoAuth = false
fs.Infof(nil, "Cannot run web-gui without authentication, using default auth")
}
if rcflags.Opt.HTTPOptions.BasicUser == "" {
rcflags.Opt.HTTPOptions.BasicUser = "gui"
fs.Infof(nil, "Using default username: %s \n", rcflags.Opt.HTTPOptions.BasicUser)
}
if rcflags.Opt.HTTPOptions.BasicPass == "" {
randomPass, err := random.Password(128)
if err != nil {
log.Fatalf("Failed to make password: %v", err)
}
rcflags.Opt.HTTPOptions.BasicPass = randomPass
fs.Infof(nil, "No password specified. Using random password: %s \n", randomPass)
}
rcflags.Opt.Serve = true
}
s, err := rcserver.Start(&rcflags.Opt)
@@ -88,34 +70,29 @@ See the [rc documentation](/rc/) for more info on the rc flags.
//checkRelease is a helper function to download and setup latest release of rclone-webui-react
func checkRelease(shouldUpdate bool) (err error) {
// Get the latest release details
WebUIURL, tag, size, err := getLatestReleaseURL()
if err != nil {
return err
}
zipName := tag + ".zip"
cachePath := filepath.Join(config.CacheDir, "webgui")
zipPath := filepath.Join(cachePath, zipName)
extractPath := filepath.Join(cachePath, "current")
oldUpdateExists := exists(extractPath)
// if the old file exists does not exist or forced update is enforced.
// TODO: Add hashing to check integrity of the previous update.
if !oldUpdateExists || shouldUpdate {
// Get the latest release details
WebUIURL, tag, size, err := getLatestReleaseURL()
if err != nil {
return err
if !exists(cachePath) {
if err := os.MkdirAll(cachePath, 0755); err != nil {
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
}
zipName := tag + ".zip"
zipPath := filepath.Join(cachePath, zipName)
if !exists(cachePath) {
if err := os.MkdirAll(cachePath, 0755); err != nil {
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
return err
}
}
}
// Load the file
exists := exists(zipPath)
// if the zipFile does not exist or forced update is enforced.
if !exists || shouldUpdate {
fs.Logf(nil, "A new release for gui is present at "+WebUIURL)
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
// download the zip from latest url
err = downloadFile(zipPath, WebUIURL)
err := downloadFile(zipPath, WebUIURL)
if err != nil {
return err
}

View File

@@ -1,11 +1,9 @@
package dlna
import (
"context"
"encoding/xml"
"fmt"
"log"
"mime"
"net/http"
"net/url"
"os"
@@ -13,7 +11,6 @@ import (
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/anacrolix/dms/dlna"
"github.com/anacrolix/dms/upnp"
@@ -23,39 +20,6 @@ import (
"github.com/rclone/rclone/vfs"
)
// Add a minimal number of mime types to augment go's built in types
// for environments which don't have access to a mime.types file (eg
// Termux on android)
func init() {
for _, t := range []struct {
mimeType string
extensions string
}{
{"audio/flac", ".flac"},
{"audio/mpeg", ".mpga,.mpega,.mp2,.mp3,.m4a"},
{"audio/ogg", ".oga,.ogg,.opus,.spx"},
{"audio/x-wav", ".wav"},
{"image/tiff", ".tiff,.tif"},
{"video/dv", ".dif,.dv"},
{"video/fli", ".fli"},
{"video/mpeg", ".mpeg,.mpg,.mpe"},
{"video/MP2T", ".ts"},
{"video/mp4", ".mp4"},
{"video/quicktime", ".qt,.mov"},
{"video/ogg", ".ogv"},
{"video/webm", ".webm"},
{"video/x-msvideo", ".avi"},
{"video/x-matroska", ".mpv,.mkv"},
} {
for _, ext := range strings.Split(t.extensions, ",") {
err := mime.AddExtensionType(ext, t.mimeType)
if err != nil {
panic(err)
}
}
}
}
type contentDirectoryService struct {
*server
upnp.Eventing
@@ -69,7 +33,7 @@ var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/")
// Turns the given entry and DMS host into a UPnP object. A nil object is
// returned if the entry is not of interest.
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, host string) (ret interface{}, err error) {
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo os.FileInfo, host string) (ret interface{}, err error) {
obj := upnpav.Object{
ID: cdsObject.ID(),
Restricted: 1,
@@ -77,32 +41,17 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
}
if fileInfo.IsDir() {
children, err := cds.readContainer(cdsObject, host)
if err != nil {
return nil, err
}
obj.Class = "object.container.storageFolder"
obj.Title = fileInfo.Name()
return upnpav.Container{
Object: obj,
ChildCount: len(children),
}, nil
ret = upnpav.Container{Object: obj}
return
}
if !fileInfo.Mode().IsRegular() {
return
}
// Read the mime type from the fs.Object if possible,
// otherwise fall back to working out what it is from the file path.
var mimeType string
if o, ok := fileInfo.DirEntry().(fs.Object); ok {
mimeType = fs.MimeType(context.TODO(), o)
} else {
mimeType = fs.MimeTypeFromName(fileInfo.Name())
}
mimeType := fs.MimeTypeFromName(fileInfo.Name())
mediaType := mediaMimeTypeRegexp.FindStringSubmatch(mimeType)
if mediaType == nil {
return
@@ -252,15 +201,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
"UpdateID": cds.updateIDString(),
}, nil
case "BrowseMetadata":
node, err := cds.vfs.Stat(obj.Path)
if err != nil {
return nil, err
}
upnpObject, err := cds.cdsObjectToUpnpavObject(obj, node, host)
if err != nil {
return nil, err
}
result, err := xml.Marshal(upnpObject)
result, err := xml.Marshal(obj)
if err != nil {
return nil, err
}

File diff suppressed because one or more lines are too long

View File

@@ -2,35 +2,3 @@
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"io/ioutil"
"text/template"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// GetTemplate returns the rootDesc XML template
func GetTemplate() (tpl *template.Template, err error) {
templateFile, err := Assets.Open("rootDesc.xml.tmpl")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
var templateString = string(templateBytes)
tpl, err = template.New("rootDesc").Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

Some files were not shown because too many files have changed in this diff Show More