1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-20 18:23:31 +00:00

Compare commits

..

1 Commits

1319 changed files with 50134 additions and 150696 deletions

View File

@@ -2,7 +2,7 @@ version: "{build}"
os: Windows Server 2012 R2 os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\rclone\rclone clone_folder: c:\gopath\src\github.com\ncw\rclone
cache: cache:
- '%LocalAppData%\go-build' - '%LocalAppData%\go-build'
@@ -16,7 +16,7 @@ environment:
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH% PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
PATH: '%PATHCC64%' PATH: '%PATHCC64%'
RCLONE_CONFIG_PASS: RCLONE_CONFIG_PASS:
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk= secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
install: install:
- choco install winfsp -y - choco install winfsp -y
@@ -46,4 +46,4 @@ artifacts:
- path: build/*-v*.zip - path: build/*-v*.zip
deploy_script: deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload - IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -6,7 +6,7 @@ jobs:
build: build:
machine: true machine: true
working_directory: ~/.go_workspace/src/github.com/rclone/rclone working_directory: ~/.go_workspace/src/github.com/ncw/rclone
steps: steps:
- checkout - checkout

3
.gitattributes vendored
View File

@@ -1,3 +0,0 @@
# Ignore generated files in GitHub language statistics and diffs
/MANUAL.* linguist-generated=true
/rclone.1 linguist-generated=true

View File

@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
If you are reporting a bug or asking for a new feature then please use one of the templates here: If you are reporting a bug or asking for a new feature then please use one of the templates here:
https://github.com/rclone/rclone/issues/new https://github.com/ncw/rclone/issues/new
otherwise fill in the form below. otherwise fill in the form below.

View File

@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
#### Checklist #### Checklist
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request). - [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
- [ ] I have added tests for all changes in this PR if appropriate. - [ ] I have added tests for all changes in this PR if appropriate.
- [ ] I have added documentation for the changes if appropriate. - [ ] I have added documentation for the changes if appropriate.
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages). - [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
- [ ] I'm done, this Pull Request is ready for review :-) - [ ] I'm done, this Pull Request is ready for review :-)

3
.gitignore vendored
View File

@@ -5,6 +5,3 @@ build
docs/public docs/public
rclone.iml rclone.iml
.idea .idea
.history
*.test
*.log

View File

@@ -1,5 +1,9 @@
# golangci-lint configuration options # golangci-lint configuration options
run:
build-tags:
- cmount
linters: linters:
enable: enable:
- deadcode - deadcode

View File

@@ -1,10 +1,10 @@
--- ---
language: go language: go
sudo: required sudo: required
dist: xenial dist: trusty
os: os:
- linux - linux
go_import_path: github.com/rclone/rclone go_import_path: github.com/ncw/rclone
before_install: before_install:
- git fetch --unshallow --tags - git fetch --unshallow --tags
- | - |
@@ -31,10 +31,9 @@ install:
env: env:
global: global:
- GOTAGS=cmount - GOTAGS=cmount
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
- GO111MODULE=off - GO111MODULE=off
- GITHUB_USER=ncw
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA= - secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk= - secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
addons: addons:
apt: apt:
@@ -50,6 +49,9 @@ matrix:
allow_failures: allow_failures:
- go: tip - go: tip
include: include:
- go: 1.8.x
script:
- make quicktest
- go: 1.9.x - go: 1.9.x
script: script:
- make quicktest - make quicktest
@@ -60,38 +62,18 @@ matrix:
script: script:
- make quicktest - make quicktest
- go: 1.12.x - go: 1.12.x
name: Linux
env: env:
- GOTAGS=cmount - GOTAGS=cmount
- BUILD_FLAGS='-include "^linux/"'
- DEPLOY=true
script: script:
- make build_dep - make build_dep
- make check - make check
- make quicktest - make quicktest
- go: 1.12.x
name: Go Modules / Race
env:
- GO111MODULE=on
- GOPROXY=https://proxy.golang.org
script:
- make quicktest
- make racequicktest - make racequicktest
- go: 1.12.x
name: Other OS
env:
- DEPLOY=true
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all - make compile_all
- go: 1.12.x - os: osx
name: macOS go: 1.12.x
os: osx
env: env:
- GOTAGS= # cmount doesn't work on osx travis for some reason - GOTAGS= # cmount doesn't work on osx travis for some reason
- BUILD_FLAGS='-include "^darwin/" -cgo'
- DEPLOY=true
cache: cache:
directories: directories:
- $HOME/Library/Caches/go-build - $HOME/Library/Caches/go-build
@@ -100,12 +82,10 @@ matrix:
- make quicktest - make quicktest
- make racequicktest - make racequicktest
# - os: windows # - os: windows
# name: Windows
# go: 1.12.x # go: 1.12.x
# env: # env:
# - GOTAGS=cmount # - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse' # - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
# #filter_secrets: false # works around a problem with secrets under windows # #filter_secrets: false # works around a problem with secrets under windows
# cache: # cache:
# directories: # directories:
@@ -123,6 +103,7 @@ deploy:
script: make travis_beta script: make travis_beta
skip_cleanup: true skip_cleanup: true
on: on:
repo: rclone/rclone repo: ncw/rclone
all_branches: true all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"

View File

@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
getting started docs](https://golang.org/doc/install) for more info. getting started docs](https://golang.org/doc/install) for more info.
First in your web browser press the fork button on [rclone's GitHub First in your web browser press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone). page](https://github.com/ncw/rclone).
Now in your terminal Now in your terminal
go get -u github.com/rclone/rclone go get -u github.com/ncw/rclone
cd $GOPATH/src/github.com/rclone/rclone cd $GOPATH/src/github.com/ncw/rclone
git remote rename origin upstream git remote rename origin upstream
git remote add origin git@github.com:YOURUSER/rclone.git git remote add origin git@github.com:YOURUSER/rclone.git
@@ -127,7 +127,7 @@ If you want to use the integration test framework to run these tests
all together with an HTML report and test retries then from the all together with an HTML report and test retries then from the
project root: project root:
go install github.com/rclone/rclone/fstest/test_all go install github.com/ncw/rclone/fstest/test_all
test_all -backend drive test_all -backend drive
If you want to run all the integration tests against all the remotes, If you want to run all the integration tests against all the remotes,
@@ -135,7 +135,7 @@ then change into the project root and run
make test make test
This command is run daily on the integration test server. You can This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##

View File

@@ -51,7 +51,7 @@ The milestones have these meanings:
* Help wanted - blue sky stuff that might get moved up, or someone could help with * Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment * Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up. Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets ## ## Closing Tickets ##

File diff suppressed because it is too large Load Diff

2005
MANUAL.md

File diff suppressed because it is too large Load Diff

4953
MANUAL.txt

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
SHELL = bash SHELL = bash
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD)) BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
LAST_TAG := $(shell git describe --tags --abbrev=0) LAST_TAG := $(shell git describe --tags --abbrev=0)
ifeq ($(BRANCH),$(LAST_TAG)) ifeq ($(BRANCH),$(LAST_TAG))
BRANCH := master BRANCH := master
@@ -24,14 +24,13 @@ BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
.PHONY: rclone vars version .PHONY: rclone vars version
rclone: rclone:
touch fs/version.go touch fs/version.go
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
cp -av `go env GOPATH`/bin/rclone . cp -av `go env GOPATH`/bin/rclone .
vars: vars:
@@ -48,7 +47,7 @@ version:
# Full suite of integration tests # Full suite of integration tests
test: rclone test: rclone
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
-test_all 2>&1 | tee test_all.log -test_all 2>&1 | tee test_all.log
@echo "Written logs in test_all.log" @echo "Written logs in test_all.log"
@@ -61,8 +60,11 @@ racequicktest:
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./... @go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies # Get the build dependencies
@@ -95,10 +97,10 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py ./bin/make_backend_docs.py
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
@@ -151,7 +153,7 @@ log_since_last_release:
git log $(LAST_TAG).. git log $(LAST_TAG)..
compile_all: compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload: appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -167,12 +169,22 @@ ifndef BRANCH_PATH
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta: travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux) ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz' go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
@@ -181,7 +193,7 @@ endif
# Fetch the binary builds from travis and appveyor # Fetch the binary builds from travis and appveyor
fetch_binaries: fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server -v -w cd docs && hugo server -v -w

View File

@@ -7,12 +7,12 @@
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) | [Forum](https://forum.rclone.org/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone) [![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master) [![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
# Rclone # Rclone
@@ -20,7 +20,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
## Storage providers ## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status)) * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/) * Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
@@ -33,7 +32,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* FTP [:page_facing_up:](https://rclone.org/ftp/) * FTP [:page_facing_up:](https://rclone.org/ftp/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HTTP [:page_facing_up:](https://rclone.org/http/) * HTTP [:page_facing_up:](https://rclone.org/http/)
* Hubic [:page_facing_up:](https://rclone.org/hubic/) * Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
@@ -75,8 +73,6 @@ Please see [the full list of all storage providers and their features](https://r
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/)) * Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation ## Installation & documentation

View File

@@ -11,7 +11,7 @@ Making a release
* edit docs/content/changelog.md * edit docs/content/changelog.md
* make doc * make doc
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX"
* make retag * make retag
* git push --tags origin master * git push --tags origin master
* # Wait for the appveyor and travis builds to complete then... * # Wait for the appveyor and travis builds to complete then...
@@ -27,7 +27,6 @@ Making a release
Early in the next release cycle update the vendored dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update * make update
* git status * git status
* git add new files * git add new files

View File

@@ -1,189 +0,0 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
trigger:
tags:
include:
- '*'
strategy:
matrix:
go1.9:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.11.8
MAKE_QUICKTEST: true
linux:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
BUILD_FLAGS: '-include "^linux/"'
MAKE_CHECK: true
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true
modules_race:
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true
mac:
imageName: macos-10.13
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true
DEPLOY: true
windows:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
MAKE_QUICKTEST: true
DEPLOY: true
pool:
vmImage: $(imageName)
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
echo "##vso[task.setvariable variable=GO_LATEST]true"
echo "Latest Go version: $latestGo"
condition: eq( variables['GO_VERSION'], 'latest' )
displayName: "Get latest Go version"
- bash: |
sudo rm -f $(which go)
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
mkdir -p '$(modulePath)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
# Install Libraries (varies by platform)
- bash: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Libraries on Linux
- bash: |
brew update
brew tap caskroom/cask
brew cask install osxfuse
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Libraries on macOS
- powershell: |
choco install -y winfsp zip make
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Go on macOS
- powershell: |
Write-Host "Downloading Go... (please be patient, I am very slow)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
Write-Host "Extracting Go... (I'm slow too)"
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Go on Windows
- bash: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nSystem environment:\n\n"
env
printf "\n\nRclone environment:\n\n"
make vars
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
- script: |
make
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
- bash: |
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
- bash: |
make build_dep
make check
workingDirectory: '$(modulePath)'
displayName: Code quality test
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make compile_all
workingDirectory: '$(modulePath)'
displayName: Compile all architectures test
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
- bash: |
make vars # FIXME travis_beta
workingDirectory: '$(modulePath)'
displayName: Deploy built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )

View File

@@ -4,17 +4,17 @@ import (
"errors" "errors"
"strings" "strings"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/ncw/rclone/fs/fspath"
) )
// Register with Fs // Register with Fs
func init() { func init() {
fsi := &fs.RegInfo{ fsi := &fs.RegInfo{
Name: "alias", Name: "alias",
Description: "Alias for an existing remote", Description: "Alias for a existing remote",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",

View File

@@ -1,16 +1,15 @@
package alias package alias
import ( import (
"context"
"fmt" "fmt"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
"testing" "testing"
_ "github.com/rclone/rclone/backend/local" // pull in test backend _ "github.com/ncw/rclone/backend/local" // pull in test backend
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/ncw/rclone/fs/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
prepare(t, remoteRoot) prepare(t, remoteRoot)
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot)) f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
require.NoError(t, err, what) require.NoError(t, err, what)
gotEntries, err := f.List(context.Background(), test.fsList) gotEntries, err := f.List(test.fsList)
require.NoError(t, err, what) require.NoError(t, err, what)
sort.Sort(gotEntries) sort.Sort(gotEntries)

View File

@@ -2,33 +2,31 @@ package all
import ( import (
// Active file systems // Active file systems
_ "github.com/rclone/rclone/backend/alias" _ "github.com/ncw/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/amazonclouddrive" _ "github.com/ncw/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob" _ "github.com/ncw/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/b2" _ "github.com/ncw/rclone/backend/b2"
_ "github.com/rclone/rclone/backend/box" _ "github.com/ncw/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache" _ "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/ncw/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/ncw/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/ncw/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/ftp" _ "github.com/ncw/rclone/backend/googlecloudstorage"
_ "github.com/rclone/rclone/backend/googlecloudstorage" _ "github.com/ncw/rclone/backend/http"
_ "github.com/rclone/rclone/backend/googlephotos" _ "github.com/ncw/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/http" _ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/hubic" _ "github.com/ncw/rclone/backend/koofr"
_ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/koofr" _ "github.com/ncw/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/mega" _ "github.com/ncw/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/onedrive" _ "github.com/ncw/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/opendrive" _ "github.com/ncw/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/pcloud" _ "github.com/ncw/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/ncw/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/ncw/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/ncw/rclone/backend/union"
_ "github.com/rclone/rclone/backend/swift" _ "github.com/ncw/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/union" _ "github.com/ncw/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
) )

View File

@@ -12,7 +12,6 @@ we ignore assets completely!
*/ */
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@@ -23,17 +22,18 @@ import (
"time" "time"
acd "github.com/ncw/go-acd" acd "github.com/ncw/go-acd"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -247,7 +247,6 @@ func filterRequest(req *http.Request) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -309,7 +308,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, f.trueRootID, f) f.dirCache = dircache.New(root, f.trueRootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -317,12 +316,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF) tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = tempF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(ctx, remote, nil) _, err := tempF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -332,7 +331,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} }
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -354,7 +353,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -363,7 +362,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// Set info but not meta // Set info but not meta
o.info = info o.info = info
} else { } else {
err := o.readMetaData(ctx) // reads info and meta, returning an error err := o.readMetaData() // reads info and meta, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -373,12 +372,12 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf) //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
@@ -405,7 +404,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd.FolderFromId(pathID, f.c.Nodes) folder := acd.FolderFromId(pathID, f.c.Nodes)
var resp *http.Response var resp *http.Response
@@ -503,12 +502,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -526,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
d := fs.NewDir(remote, when).SetID(*node.Id) d := fs.NewDir(remote, when).SetID(*node.Id)
entries = append(entries, d) entries = append(entries, d)
case fileKind: case fileKind:
o, err := f.newObjectWithInfo(ctx, remote, node) o, err := f.newObjectWithInfo(remote, node)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -570,7 +569,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// At the end of large uploads. The speculation is that the timeout // At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well // is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded. // be properly uploaded.
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) { func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
// Return if no error - all is well // Return if no error - all is well
if inErr == nil { if inErr == nil {
return false, inInfo, inErr return false, inInfo, inErr
@@ -610,7 +609,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus) fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
remote := src.Remote() remote := src.Remote()
for i := 1; i <= retries; i++ { for i := 1; i <= retries; i++ {
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries) fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
} else if err != nil { } else if err != nil {
@@ -636,7 +635,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
// Temporary Object under construction // Temporary Object under construction
@@ -645,17 +644,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
remote: remote, remote: remote,
} }
// Check if object already exists // Check if object already exists
err := o.readMetaData(ctx) err := o.readMetaData()
switch err { switch err {
case nil: case nil:
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
default: default:
return nil, err return nil, err
} }
// If not create it // If not create it
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -671,7 +670,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
info, resp, err = folder.Put(in, leaf) info, resp, err = folder.Put(in, leaf)
f.tokenRenewer.Stop() f.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -685,13 +684,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true) _, err = f.dirCache.FindDir(dir, true)
} }
return err return err
} }
@@ -705,7 +704,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$' // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
@@ -714,15 +713,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// create the destination directory if necessary // create the destination directory if necessary
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -738,12 +737,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
srcErr, dstErr error srcErr, dstErr error
) )
for i := 1; i <= fs.Config.LowLevelRetries; i++ { for i := 1; i <= fs.Config.LowLevelRetries; i++ {
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object _, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
if srcErr != nil && srcErr != fs.ErrorObjectNotFound { if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
// exit if error on source // exit if error on source
return nil, srcErr return nil, srcErr
} }
dstObj, dstErr = f.NewObject(ctx, remote) dstObj, dstErr = f.NewObject(remote)
if dstErr != nil && dstErr != fs.ErrorObjectNotFound { if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
// exit if error on dst // exit if error on dst
return nil, dstErr return nil, dstErr
@@ -772,7 +771,7 @@ func (f *Fs) DirCacheFlush() {
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(src, "DirMove error: not same remote type") fs.Debugf(src, "DirMove error: not same remote type")
@@ -788,14 +787,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// find the root src directory // find the root src directory
err = srcFs.dirCache.FindRoot(ctx, false) err = srcFs.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true) err = f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
@@ -810,14 +809,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true) dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false) _, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -833,7 +832,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcRemote == "" { if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID() srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else { } else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false) _, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
} }
if err != nil { if err != nil {
return err return err
@@ -841,7 +840,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
srcLeaf, _ := dircache.SplitPath(srcPath) srcLeaf, _ := dircache.SplitPath(srcPath)
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -874,17 +873,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// purgeCheck remotes the root directory, if check is set then it // purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false) err := dc.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -933,8 +932,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -956,7 +955,7 @@ func (f *Fs) Hashes() hash.Set {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { //func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object) // srcObj, ok := src.(*Object)
// if !ok { // if !ok {
// fs.Debugf(src, "Can't copy - not same remote type") // fs.Debugf(src, "Can't copy - not same remote type")
@@ -967,7 +966,7 @@ func (f *Fs) Hashes() hash.Set {
// if err != nil { // if err != nil {
// return nil, err // return nil, err
// } // }
// return f.NewObject(ctx, remote), nil // return f.NewObject(remote), nil
//} //}
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -975,8 +974,8 @@ func (f *Fs) Hashes() hash.Set {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, "", false) return f.purgeCheck("", false)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -1000,7 +999,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1023,11 +1022,11 @@ func (o *Object) Size() int64 {
// it also sets the info // it also sets the info
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if o.info != nil { if o.info != nil {
return nil return nil
} }
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
@@ -1056,8 +1055,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -1071,7 +1070,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// FIXME not implemented // FIXME not implemented
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1082,7 +1081,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold) bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
if bigObject { if bigObject {
fs.Debugf(o, "Downloading large object via tempLink") fs.Debugf(o, "Downloading large object via tempLink")
@@ -1094,7 +1093,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if !bigObject { if !bigObject {
in, resp, err = file.OpenHeaders(headers) in, resp, err = file.OpenHeaders(headers)
} else { } else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
} }
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })
@@ -1104,7 +1103,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
file := acd.File{Node: o.info} file := acd.File{Node: o.info}
var info *acd.File var info *acd.File
var resp *http.Response var resp *http.Response
@@ -1115,7 +1114,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info, resp, err = file.Overwrite(in) info, resp, err = file.Overwrite(in)
o.fs.tokenRenewer.Stop() o.fs.tokenRenewer.Stop()
var ok bool var ok bool
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start)) ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
if ok { if ok {
return false, nil return false, nil
} }
@@ -1140,7 +1139,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.removeNode(o.info) return o.fs.removeNode(o.info)
} }
@@ -1262,7 +1261,7 @@ OnConflict:
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil { if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
return *o.info.ContentProperties.ContentType return *o.info.ContentProperties.ContentType
} }
@@ -1275,7 +1274,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// Automatically restarts itself in case of unexpected behaviour of the remote. // Automatically restarts itself in case of unexpected behaviour of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
checkpoint := f.opt.Checkpoint checkpoint := f.opt.Checkpoint
go func() { go func() {

View File

@@ -7,9 +7,9 @@ package amazonclouddrive_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/amazonclouddrive" "github.com/ncw/rclone/backend/amazonclouddrive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob
@@ -24,16 +24,16 @@ import (
"github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/pacer"
) )
const ( const (
@@ -53,11 +53,6 @@ const (
maxUploadCutoff = 256 * fs.MebiByte maxUploadCutoff = 256 * fs.MebiByte
defaultAccessTier = azblob.AccessTierNone defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing) maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
// though it is a base64 key checked in here, it is publicly available secret.
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
) )
// Register with Fs // Register with Fs
@@ -68,17 +63,13 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: "Storage Account Name (leave blank to use SAS URL or Emulator)", Help: "Storage Account Name (leave blank to use connection string or SAS URL)",
}, { }, {
Name: "key", Name: "key",
Help: "Storage Account Key (leave blank to use SAS URL or Emulator)", Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
}, { }, {
Name: "sas_url", Name: "sas_url",
Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)", Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
}, {
Name: "use_emulator",
Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for the service\nLeave blank normally.", Help: "Endpoint for the service\nLeave blank normally.",
@@ -138,7 +129,6 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"` ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"` AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
} }
// Fs represents a remote azure server // Fs represents a remote azure server
@@ -319,7 +309,6 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -376,18 +365,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
containerURL azblob.ContainerURL containerURL azblob.ContainerURL
) )
switch { switch {
case opt.UseEmulator:
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
if err != nil {
return nil, errors.Wrapf(err, "Failed to parse credentials")
}
u, err = url.Parse(emulatorBlobEndpoint)
if err != nil {
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
}
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
serviceURL = azblob.NewServiceURL(*u, pipeline)
containerURL = serviceURL.NewContainerURL(container)
case opt.Account != "" && opt.Key != "": case opt.Account != "" && opt.Key != "":
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
if err != nil { if err != nil {
@@ -438,7 +415,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else { } else {
f.root += "/" f.root += "/"
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f // File doesn't exist or is a directory so return old f
@@ -477,7 +454,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -519,7 +496,7 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
// the container and root supplied // the container and root supplied
// //
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error { func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
f.containerOKMu.Lock() f.containerOKMu.Lock()
deleted := f.containerDeleted deleted := f.containerDeleted
f.containerOKMu.Unlock() f.containerOKMu.Unlock()
@@ -546,6 +523,7 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint
Prefix: root, Prefix: root,
MaxResults: int32(maxResults), MaxResults: int32(maxResults),
} }
ctx := context.Background()
directoryMarkers := map[string]struct{}{} directoryMarkers := map[string]struct{}{}
for marker := (azblob.Marker{}); marker.NotDone(); { for marker := (azblob.Marker{}); marker.NotDone(); {
var response *azblob.ListBlobsHierarchySegmentResponse var response *azblob.ListBlobsHierarchySegmentResponse
@@ -643,8 +621,8 @@ func (f *Fs) markContainerOK() {
} }
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -687,11 +665,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.container == "" { if f.container == "" {
return f.listContainers(dir) return f.listContainers(dir)
} }
return f.listDir(ctx, dir) return f.listDir(dir)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -710,12 +688,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.container == "" { if f.container == "" {
return fs.ErrorListBucketRequired return fs.ErrorListBucketRequired
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -767,13 +745,13 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
fs := &Object{ fs := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return fs, fs.Update(ctx, in, src, options...) return fs, fs.Update(in, src, options...)
} }
// Check if the container exists // Check if the container exists
@@ -806,7 +784,7 @@ func (f *Fs) dirExists() (bool, error) {
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
f.containerOKMu.Lock() f.containerOKMu.Lock()
defer f.containerOKMu.Unlock() defer f.containerOKMu.Unlock()
if f.containerOK { if f.containerOK {
@@ -853,9 +831,9 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
} }
// isEmpty checks to see if a given directory is empty and returns an error if not // isEmpty checks to see if a given directory is empty and returns an error if not
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) { func (f *Fs) isEmpty(dir string) (err error) {
empty := true empty := true
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error { err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
empty = false empty = false
return nil return nil
}) })
@@ -902,8 +880,8 @@ func (f *Fs) deleteContainer() error {
// Rmdir deletes the container if the fs is at the root // Rmdir deletes the container if the fs is at the root
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
err := f.isEmpty(ctx, dir) err := f.isEmpty(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -924,7 +902,7 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
dir := "" // forward compat! dir := "" // forward compat!
if f.root != "" || dir != "" { if f.root != "" || dir != "" {
// Delegate to caller if not root container // Delegate to caller if not root container
@@ -942,8 +920,8 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "") err := f.Mkdir("")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -961,6 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
options := azblob.BlobAccessConditions{} options := azblob.BlobAccessConditions{}
ctx := context.Background()
var startCopy *azblob.BlobStartCopyFromURLResponse var startCopy *azblob.BlobStartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -981,7 +960,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
copyStatus = getMetadata.CopyStatus() copyStatus = getMetadata.CopyStatus()
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -1005,7 +984,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1145,14 +1124,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) { func (o *Object) ModTime() (result time.Time) {
// The error is logged in readMetaData // The error is logged in readMetaData
_ = o.readMetaData() _ = o.readMetaData()
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// Make sure o.meta is not nil // Make sure o.meta is not nil
if o.meta == nil { if o.meta == nil {
o.meta = make(map[string]string, 1) o.meta = make(map[string]string, 1)
@@ -1161,6 +1140,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.meta[modTimeKey] = modTime.Format(timeFormatOut) o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
err := o.fs.pacer.Call(func() (bool, error) { err := o.fs.pacer.Call(func() (bool, error) {
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}) _, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1178,7 +1158,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Offset and Count for range download // Offset and Count for range download
var offset int64 var offset int64
var count int64 var count int64
@@ -1202,6 +1182,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
} }
blob := o.getBlobReference() blob := o.getBlobReference()
ctx := context.Background()
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
var dowloadResponse *azblob.DownloadResponse var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1390,26 +1371,26 @@ outer:
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
err = o.fs.Mkdir(ctx, "") err = o.fs.Mkdir("")
if err != nil { if err != nil {
return err return err
} }
size := src.Size() size := src.Size()
// Update Mod time // Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx)) o.updateMetadataWithModTime(src.ModTime())
if err != nil { if err != nil {
return err return err
} }
blob := o.getBlobReference() blob := o.getBlobReference()
httpHeaders := azblob.BlobHTTPHeaders{} httpHeaders := azblob.BlobHTTPHeaders{}
httpHeaders.ContentType = fs.MimeType(ctx, o) httpHeaders.ContentType = fs.MimeType(o)
// Compute the Content-MD5 of the file, for multiparts uploads it // Compute the Content-MD5 of the file, for multiparts uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block // Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport // in order to validate its integrity during transport
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" { if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5) sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil { if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes httpHeaders.ContentMD5 = sourceMD5bytes
@@ -1427,13 +1408,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75 // FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk // is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multpart upload to work around. // size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653 // See: https://github.com/ncw/rclone/issues/2653
multipartUpload := size >= int64(o.fs.opt.UploadCutoff) multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) { if size == int64(o.fs.opt.ChunkSize) {
multipartUpload = true multipartUpload = true
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size) fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
} }
ctx := context.Background()
// Don't retry, return a retry error instead // Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
if multipartUpload { if multipartUpload {
@@ -1466,10 +1448,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
blob := o.getBlobReference() blob := o.getBlobReference()
snapShotOptions := azblob.DeleteSnapshotsOptionNone snapShotOptions := azblob.DeleteSnapshotsOptionNone
ac := azblob.BlobAccessConditions{} ac := azblob.BlobAccessConditions{}
ctx := context.Background()
return o.fs.pacer.Call(func() (bool, error) { return o.fs.pacer.Call(func() (bool, error) {
_, err := blob.Delete(ctx, snapShotOptions, ac) _, err := blob.Delete(ctx, snapShotOptions, ac)
return o.fs.shouldRetry(err) return o.fs.shouldRetry(err)
@@ -1477,7 +1460,7 @@ func (o *Object) Remove(ctx context.Context) error {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob

View File

@@ -1,14 +1,14 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 solaris // +build plan9 solaris !go1.8
package azureblob package azureblob

View File

@@ -7,7 +7,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/fs/fserrors" "github.com/ncw/rclone/fs/fserrors"
) )
// Error describes a B2 error response // Error describes a B2 error response
@@ -189,21 +189,6 @@ type GetUploadURLResponse struct {
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file. AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
} }
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
type GetDownloadAuthorizationRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
}
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
type GetDownloadAuthorizationResponse struct {
BucketID string `json:"bucketId"` // The unique ID of the bucket.
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
}
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file // FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
type FileInfo struct { type FileInfo struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version. ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
@@ -326,14 +311,3 @@ type CancelLargeFileResponse struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket. BucketID string `json:"bucketId"` // The unique ID of the bucket.
} }
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}

View File

@@ -4,8 +4,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/b2/api" "github.com/ncw/rclone/backend/b2/api"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -7,7 +7,6 @@ package b2
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"fmt" "fmt"
gohash "hash" gohash "hash"
@@ -20,18 +19,18 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
) )
const ( const (
@@ -132,17 +131,8 @@ minimum size.`,
This is usually set to a Cloudflare CDN URL as Backblaze offers This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network. free egress for data downloaded through the Cloudflare network.
This is probably only useful for a public bucket.
Leave blank if you want to use the endpoint provided by Backblaze.`, Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true, Advanced: true,
}, {
Name: "download_auth_duration",
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
}}, }},
}) })
} }
@@ -159,7 +149,6 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
} }
// Fs represents a remote b2 server // Fs represents a remote b2 server
@@ -174,8 +163,6 @@ type Fs struct {
bucketOK bool // true if we have created the bucket bucketOK bool // true if we have created the bucket
bucketIDMutex sync.Mutex // mutex to protect _bucketID bucketIDMutex sync.Mutex // mutex to protect _bucketID
_bucketID string // the ID of the bucket we are working on _bucketID string // the ID of the bucket we are working on
bucketTypeMutex sync.Mutex // mutex to protect _bucketType
_bucketType string // the Type of the bucket we are working on
info api.AuthorizeAccountResponse // result of authorize call info api.AuthorizeAccountResponse // result of authorize call
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadURLResponse // result of get upload URL calls uploads []*api.GetUploadURLResponse // result of get upload URL calls
@@ -337,7 +324,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
// NewFs constructs an Fs from the path, bucket:path // NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -412,7 +398,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
} else { } else {
f.root += "/" f.root += "/"
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -451,16 +437,6 @@ func (f *Fs) authorizeAccount() error {
return nil return nil
} }
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
for _, capability := range f.info.Allowed.Capabilities {
if capability == permission {
return true
}
}
return false
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
// //
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
@@ -540,7 +516,7 @@ func (f *Fs) putUploadBlock(buf []byte) {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -551,7 +527,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Fil
return nil, err return nil, err
} }
} else { } else {
err := o.readMetaData(ctx) // reads info and headers, returning an error err := o.readMetaData() // reads info and headers, returning an error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -561,8 +537,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Fil
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// listFn is called from list to handle an object // listFn is called from list to handle an object
@@ -586,7 +562,7 @@ var errEndList = errors.New("end list")
// than 1000) // than 1000)
// //
// If hidden is set then it will list the hidden (deleted) files too. // If hidden is set then it will list the hidden (deleted) files too.
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error { func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
root := f.root root := f.root
if dir != "" { if dir != "" {
root += dir + "/" root += dir + "/"
@@ -667,7 +643,7 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string,
} }
// Convert a list item into a DirEntry // Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) { func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
if isDirectory { if isDirectory {
d := fs.NewDir(remote, time.Time{}) d := fs.NewDir(remote, time.Time{})
return d, nil return d, nil
@@ -681,7 +657,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
if object.Action == "hide" { if object.Action == "hide" {
return nil, nil return nil, nil
} }
o, err := f.newObjectWithInfo(ctx, remote, object) o, err := f.newObjectWithInfo(remote, object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -698,10 +674,10 @@ func (f *Fs) markBucketOK() {
} }
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
last := "" last := ""
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil { if err != nil {
return err return err
} }
@@ -743,11 +719,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" { if f.bucket == "" {
return f.listBuckets(dir) return f.listBuckets(dir)
} }
return f.listDir(ctx, dir) return f.listDir(dir)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -766,14 +742,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" { if f.bucket == "" {
return fs.ErrorListBucketRequired return fs.ErrorListBucketRequired
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
last := "" last := ""
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last) entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
if err != nil { if err != nil {
return err return err
} }
@@ -818,42 +794,6 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
return nil return nil
} }
// getbucketType finds the bucketType for the current bucket name
// can be one of allPublic. allPrivate, or snapshot
func (f *Fs) getbucketType() (bucketType string, err error) {
f.bucketTypeMutex.Lock()
defer f.bucketTypeMutex.Unlock()
if f._bucketType != "" {
return f._bucketType, nil
}
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket {
bucketType = bucket.Type
}
return nil
})
if bucketType == "" {
err = fs.ErrorDirNotFound
}
f._bucketType = bucketType
return bucketType, err
}
// setBucketType sets the Type for the current bucket name
func (f *Fs) setBucketType(Type string) {
f.bucketTypeMutex.Lock()
f._bucketType = Type
f.bucketTypeMutex.Unlock()
}
// clearBucketType clears the Type for the current bucket name
func (f *Fs) clearBucketType() {
f.bucketTypeMutex.Lock()
f._bucketType = ""
f.bucketTypeMutex.Unlock()
}
// getBucketID finds the ID for the current bucket name // getBucketID finds the ID for the current bucket name
func (f *Fs) getBucketID() (bucketID string, err error) { func (f *Fs) getBucketID() (bucketID string, err error) {
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
@@ -894,22 +834,22 @@ func (f *Fs) clearBucketID() {
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
fs := &Object{ fs := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return fs, fs.Update(ctx, in, src, options...) return fs, fs.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
f.bucketOKMu.Lock() f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock() defer f.bucketOKMu.Unlock()
if f.bucketOK { if f.bucketOK {
@@ -948,7 +888,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errors.Wrap(err, "failed to create bucket") return errors.Wrap(err, "failed to create bucket")
} }
f.setBucketID(response.ID) f.setBucketID(response.ID)
f.setBucketType(response.Type)
f.bucketOK = true f.bucketOK = true
return nil return nil
} }
@@ -956,7 +895,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir deletes the bucket if the fs is at the root // Rmdir deletes the bucket if the fs is at the root
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
f.bucketOKMu.Lock() f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock() defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" { if f.root != "" || dir != "" {
@@ -984,7 +923,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
f.bucketOK = false f.bucketOK = false
f.clearBucketID() f.clearBucketID()
f.clearBucketType()
f.clearUploadURL() f.clearUploadURL()
return nil return nil
} }
@@ -1052,7 +990,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
// if oldOnly is true then it deletes only non current files. // if oldOnly is true then it deletes only non current files.
// //
// Implemented here so we can make sure we delete old versions. // Implemented here so we can make sure we delete old versions.
func (f *Fs) purge(ctx context.Context, oldOnly bool) error { func (f *Fs) purge(oldOnly bool) error {
var errReturn error var errReturn error
var checkErrMutex sync.Mutex var checkErrMutex sync.Mutex
var checkErr = func(err error) { var checkErr = func(err error) {
@@ -1080,26 +1018,16 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
go func() { go func() {
defer wg.Done() defer wg.Done()
for object := range toBeDeleted { for object := range toBeDeleted {
oi, err := f.newObjectWithInfo(ctx, object.Name, object) accounting.Stats.Checking(object.Name)
if err != nil { checkErr(f.deleteByID(object.ID, object.Name))
fs.Errorf(object.Name, "Can't create object %v", err) accounting.Stats.DoneChecking(object.Name)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
err = f.deleteByID(object.ID, object.Name)
checkErr(err)
tr.Done(err)
} }
}() }()
} }
last := "" last := ""
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error { checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory { if !isDirectory {
oi, err := f.newObjectWithInfo(ctx, object.Name, object) accounting.Stats.Checking(remote)
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
if oldOnly && last != remote { if oldOnly && last != remote {
if object.Action == "hide" { if object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID) fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
@@ -1115,7 +1043,7 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
toBeDeleted <- object toBeDeleted <- object
} }
last = remote last = remote
tr.Done(nil) accounting.Stats.DoneChecking(remote)
} }
return nil return nil
})) }))
@@ -1123,71 +1051,19 @@ func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
wg.Wait() wg.Wait()
if !oldOnly { if !oldOnly {
checkErr(f.Rmdir(ctx, "")) checkErr(f.Rmdir(""))
} }
return errReturn return errReturn
} }
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purge(ctx, false) return f.purge(false)
} }
// CleanUp deletes all the hidden files. // CleanUp deletes all the hidden files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
return f.purge(ctx, true) return f.purge(true)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
destBucketID, err := f.getBucketID()
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: srcObj.id,
Name: f.root + remote,
MetadataDirective: "COPY",
DestBucketID: destBucketID,
}
var response api.FileInfo
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
o := &Object{
fs: f,
remote: remote,
}
err = o.decodeMetaDataFileInfo(&response)
if err != nil {
return nil, err
}
return o, nil
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@@ -1195,77 +1071,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1) return hash.Set(hash.SHA1)
} }
// getDownloadAuthorization returns authorization token for downloading
// without accout.
func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) {
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
}
if !f.hasPermission("shareFiles") {
return "", errors.New("sharing a file link requires the shareFiles permission")
}
bucketID, err := f.getBucketID()
if err != nil {
return "", err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_download_authorization",
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: path.Join(f.root, remote),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response)
return f.shouldRetry(resp, err)
})
if err != nil {
return "", errors.Wrap(err, "failed to get download authorization")
}
return response.AuthorizationToken, nil
}
// PublicLink returns a link for downloading without accout.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
var RootURL string
if f.opt.DownloadURL == "" {
RootURL = f.info.DownloadURL
} else {
RootURL = f.opt.DownloadURL
}
_, err = f.NewObject(ctx, remote)
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
err2 := f.list(ctx, remote, false, "", 1, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
err = nil
return nil
})
if err2 != nil {
return "", err2
}
}
if err != nil {
return "", err
}
absPath := "/" + path.Join(f.root, remote)
link = RootURL + "/file/" + urlEncode(f.bucket) + absPath
bucketType, err := f.getbucketType()
if err != nil {
return "", err
}
if bucketType == "allPrivate" || bucketType == "snapshot" {
AuthorizationToken, err := f.getDownloadAuthorization(remote)
if err != nil {
return "", err
}
link += "?Authorization=" + AuthorizationToken
}
return link, nil
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Fs returns the parent Fs // Fs returns the parent Fs
@@ -1287,13 +1092,13 @@ func (o *Object) Remote() string {
} }
// Hash returns the Sha-1 of an object returning a lowercase hex string // Hash returns the Sha-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 { if t != hash.SHA1 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
if o.sha1 == "" { if o.sha1 == "" {
// Error is logged in readMetaData // Error is logged in readMetaData
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -1349,8 +1154,17 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType) return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
} }
// getMetaData gets the metadata from the object unconditionally // readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) { //
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData() (err error) {
if o.id != "" {
return nil
}
maxSearched := 1 maxSearched := 1
var timestamp api.Timestamp var timestamp api.Timestamp
baseRemote := o.remote baseRemote := o.remote
@@ -1358,8 +1172,8 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
timestamp, baseRemote = api.RemoveVersion(baseRemote) timestamp, baseRemote = api.RemoveVersion(baseRemote)
maxSearched = maxVersions maxSearched = maxVersions
} }
var info *api.File
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error { err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory { if isDirectory {
return nil return nil
} }
@@ -1373,30 +1187,12 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
}) })
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
} }
return nil, err return err
} }
if info == nil { if info == nil {
return nil, fs.ErrorObjectNotFound return fs.ErrorObjectNotFound
}
return info, nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// Sets
// o.id
// o.modTime
// o.size
// o.sha1
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.id != "" {
return nil
}
info, err := o.getMetaData(ctx)
if err != nil {
return err
} }
return o.decodeMetaData(info) return o.decodeMetaData(info)
} }
@@ -1417,7 +1213,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64) unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err) fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil return err
} }
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC() o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil return nil
@@ -1429,39 +1225,16 @@ func (o *Object) parseTimeString(timeString string) (err error) {
// LastModified returned in the http headers // LastModified returned in the http headers
// //
// SHA-1 will also be updated once the request has completed. // SHA-1 will also be updated once the request has completed.
func (o *Object) ModTime(ctx context.Context) (result time.Time) { func (o *Object) ModTime() (result time.Time) {
// The error is logged in readMetaData // The error is logged in readMetaData
_ = o.readMetaData(ctx) _ = o.readMetaData()
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the Object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.getMetaData(ctx) // Not possible with B2
if err != nil { return fs.ErrorCantSetModTime
return err
}
info.Info[timeKey] = timeString(modTime)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_file",
}
var request = api.CopyFileRequest{
SourceID: o.id,
Name: o.fs.root + o.remote, // copy to same name
MetadataDirective: "REPLACE",
ContentType: info.ContentType,
Info: info.Info,
}
var response api.FileInfo
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
return o.fs.shouldRetry(resp, err)
})
if err != nil {
return err
}
return o.decodeMetaDataFileInfo(&response)
} }
// Storable returns if this object is storable // Storable returns if this object is storable
@@ -1530,7 +1303,7 @@ func (file *openFile) Close() (err error) {
var _ io.ReadCloser = &openFile{} var _ io.ReadCloser = &openFile{}
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Options: options, Options: options,
@@ -1621,11 +1394,11 @@ func urlEncode(in string) string {
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if o.fs.opt.Versions { if o.fs.opt.Versions {
return errNotWithVersions return errNotWithVersions
} }
err = o.fs.Mkdir(ctx, "") err = o.fs.Mkdir("")
if err != nil { if err != nil {
return err return err
} }
@@ -1643,7 +1416,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err == nil { if err == nil {
fs.Debugf(o, "File is big enough for chunked streaming") fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src) up, err := o.fs.newLargeUpload(o, in, src)
if err != nil { if err != nil {
o.fs.putUploadBlock(buf) o.fs.putUploadBlock(buf)
return err return err
@@ -1658,16 +1431,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
} else if size > int64(o.fs.opt.UploadCutoff) { } else if size > int64(o.fs.opt.UploadCutoff) {
up, err := o.fs.newLargeUpload(ctx, o, in, src) up, err := o.fs.newLargeUpload(o, in, src)
if err != nil { if err != nil {
return err return err
} }
return up.Upload() return up.Upload()
} }
modTime := src.ModTime(ctx) modTime := src.ModTime()
calculatedSha1, _ := src.Hash(ctx, hash.SHA1) calculatedSha1, _ := src.Hash(hash.SHA1)
if calculatedSha1 == "" { if calculatedSha1 == "" {
calculatedSha1 = "hex_digits_at_end" calculatedSha1 = "hex_digits_at_end"
har := newHashAppendingReader(in, sha1.New()) har := newHashAppendingReader(in, sha1.New())
@@ -1745,7 +1518,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote), "X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
"Content-Type": fs.MimeType(ctx, src), "Content-Type": fs.MimeType(src),
sha1Header: calculatedSha1, sha1Header: calculatedSha1,
timeHeader: timeString(modTime), timeHeader: timeString(modTime),
}, },
@@ -1770,7 +1543,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
if o.fs.opt.Versions { if o.fs.opt.Versions {
return errNotWithVersions return errNotWithVersions
} }
@@ -1781,7 +1554,7 @@ func (o *Object) Remove(ctx context.Context) error {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }
@@ -1794,11 +1567,9 @@ func (o *Object) ID() string {
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{} _ fs.IDer = &Object{}

View File

@@ -4,7 +4,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
) )
// Test b2 string encoding // Test b2 string encoding

View File

@@ -4,8 +4,8 @@ package b2
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -6,7 +6,6 @@ package b2
import ( import (
"bytes" "bytes"
"context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@@ -15,12 +14,12 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ncw/rclone/backend/b2/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
) )
type hashAppendingReader struct { type hashAppendingReader struct {
@@ -81,7 +80,7 @@ type largeUpload struct {
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
remote := o.remote remote := o.remote
size := src.Size() size := src.Size()
parts := int64(0) parts := int64(0)
@@ -99,7 +98,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
sha1SliceSize = parts sha1SliceSize = parts
} }
modTime := src.ModTime(ctx) modTime := src.ModTime()
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_start_large_file", Path: "/b2_start_large_file",
@@ -111,14 +110,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
var request = api.StartLargeFileRequest{ var request = api.StartLargeFileRequest{
BucketID: bucketID, BucketID: bucketID,
Name: o.fs.root + remote, Name: o.fs.root + remote,
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(src),
Info: map[string]string{ Info: map[string]string{
timeKey: timeString(modTime), timeKey: timeString(modTime),
}, },
} }
// Set the SHA1 if known // Set the SHA1 if known
if !o.fs.opt.DisableCheckSum { if !o.fs.opt.DisableCheckSum {
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" { if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
request.Info[sha1Key] = calculatedSha1 request.Info[sha1Key] = calculatedSha1
} }
} }

View File

@@ -10,7 +10,6 @@ package box
// FIXME box can copy a directory // FIXME box can copy a directory
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -21,19 +20,19 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -194,9 +193,9 @@ func restoreReservedChars(x string) string {
} }
// readMetaDataForPath reads the metadata from the path // readMetaDataForPath reads the metadata from the path
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) // defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound return nil, fs.ErrorObjectNotFound
@@ -239,7 +238,6 @@ func errorHandler(resp *http.Response) error {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -273,7 +271,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "") _, err := f.readMetaDataForPath("")
return err return err
}) })
@@ -281,7 +279,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.dirCache = dircache.New(root, rootID, f) f.dirCache = dircache.New(root, rootID, f)
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -289,12 +287,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, rootID, &tempF) tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = tempF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.newObjectWithInfo(ctx, remote, nil) _, err := tempF.newObjectWithInfo(remote, nil)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -305,7 +303,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.features.Fill(&tempF) f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
@@ -325,7 +323,7 @@ func (f *Fs) rootSlash() string {
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -335,7 +333,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// Set info // Set info
err = o.setMetaData(info) err = o.setMetaData(info)
} else { } else {
err = o.readMetaData(ctx) // reads info and meta, returning an error err = o.readMetaData() // reads info and meta, returning an error
} }
if err != nil { if err != nil {
return nil, err return nil, err
@@ -345,12 +343,12 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil) return f.newObjectWithInfo(remote, nil)
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool { found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf { if item.Name == leaf {
@@ -370,7 +368,7 @@ func fieldsValue() url.Values {
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
var resp *http.Response var resp *http.Response
var info *api.Item var info *api.Item
@@ -469,12 +467,12 @@ OUTER:
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -488,7 +486,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// FIXME more info from dir? // FIXME more info from dir?
entries = append(entries, d) entries = append(entries, d)
} else if info.Type == api.ItemTypeFile { } else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info) o, err := f.newObjectWithInfo(remote, info)
if err != nil { if err != nil {
iErr = err iErr = err
return true return true
@@ -512,9 +510,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Returns the object, leaf, directoryID and error // Returns the object, leaf, directoryID and error
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return return
} }
@@ -531,22 +529,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
switch err { switch err {
case nil: case nil:
return existingObj, existingObj.Update(ctx, in, src, options...) return existingObj, existingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src) return f.PutUnchecked(in, src)
default: default:
return nil, err return nil, err
} }
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// PutUnchecked the object into the container // PutUnchecked the object into the container
@@ -556,26 +554,26 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime()
o, _, _, err := f.createObject(ctx, remote, modTime, size) o, _, _, err := f.createObject(remote, modTime, size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true) _, err = f.dirCache.FindDir(dir, true)
} }
return err return err
} }
@@ -595,17 +593,17 @@ func (f *Fs) deleteObject(id string) error {
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { func (f *Fs) purgeCheck(dir string, check bool) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
dc := f.dirCache dc := f.dirCache
err := dc.FindRoot(ctx, false) err := dc.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
rootID, err := dc.FindDir(ctx, dir, false) rootID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -635,8 +633,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -653,13 +651,13 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
err := srcObj.readMetaData(ctx) err := srcObj.readMetaData()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -671,7 +669,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -710,8 +708,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, "", false) return f.purgeCheck("", false)
} }
// move a file or folder // move a file or folder
@@ -748,7 +746,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -756,7 +754,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Create temporary object // Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -782,7 +780,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -798,14 +796,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// find the root src directory // find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false) err := srcFs.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true) err = f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
@@ -821,14 +819,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true) leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false) _, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -839,7 +837,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -854,8 +852,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { func (f *Fs) PublicLink(remote string) (string, error) {
id, err := f.dirCache.FindDir(ctx, remote, false) id, err := f.dirCache.FindDir(remote, false)
var opts rest.Opts var opts rest.Opts
if err == nil { if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote) fs.Debugf(f, "attempting to share directory '%s'", remote)
@@ -867,7 +865,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
} }
} else { } else {
fs.Debugf(f, "attempting to share single file '%s'", remote) fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -930,7 +928,7 @@ func (o *Object) srvPath() string {
} }
// Hash returns the SHA-1 of an object returning a lowercase hex string // Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.SHA1 { if t != hash.SHA1 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -939,7 +937,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
err := o.readMetaData(context.TODO()) err := o.readMetaData()
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return 0 return 0
@@ -964,11 +962,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
// readMetaData gets the metadata if it hasn't already been fetched // readMetaData gets the metadata if it hasn't already been fetched
// //
// it also sets the info // it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) { func (o *Object) readMetaData() (err error) {
if o.hasMetaData { if o.hasMetaData {
return nil return nil
} }
info, err := o.fs.readMetaDataForPath(ctx, o.remote) info, err := o.fs.readMetaDataForPath(o.remote)
if err != nil { if err != nil {
if apiErr, ok := err.(*api.Error); ok { if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "not_found" || apiErr.Code == "trashed" { if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
@@ -985,8 +983,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(ctx) err := o.readMetaData()
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now() return time.Now()
@@ -995,7 +993,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// setModTime sets the modification time of the local fs object // setModTime sets the modification time of the local fs object
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) { func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "PUT", Method: "PUT",
Path: "/files/" + o.id, Path: "/files/" + o.id,
@@ -1013,8 +1011,8 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
info, err := o.setModTime(ctx, modTime) info, err := o.setModTime(modTime)
if err != nil { if err != nil {
return err return err
} }
@@ -1027,7 +1025,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.id == "" { if o.id == "" {
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
@@ -1095,16 +1093,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
// If existing is set then it updates the object rather than creating a new one // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
o.fs.tokenRenewer.Start() o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop() defer o.fs.tokenRenewer.Stop()
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime()
remote := o.Remote() remote := o.Remote()
// Create the directory for the object if it doesn't exist // Create the directory for the object if it doesn't exist
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return err return err
} }
@@ -1119,7 +1117,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.deleteObject(o.id) return o.fs.deleteObject(o.id)
} }

View File

@@ -4,8 +4,8 @@ package box_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/box" "github.com/ncw/rclone/backend/box"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -14,11 +14,11 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/backend/box/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/box/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/rest"
) )
// createUploadSession creates an upload session for the object // createUploadSession creates an upload session for the object
@@ -97,7 +97,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
var body []byte var body []byte
var resp *http.Response var resp *http.Response
// For discussion of this value see: // For discussion of this value see:
// https://github.com/rclone/rclone/issues/2054 // https://github.com/ncw/rclone/issues/2054
maxTries := o.fs.opt.CommitRetries maxTries := o.fs.opt.CommitRetries
const defaultDelay = 10 const defaultDelay = 10
var tries int var tries int
@@ -112,7 +112,7 @@ outer:
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
delay := defaultDelay delay := defaultDelay
var why string why := "unknown"
if err != nil { if err != nil {
// Sometimes we get 400 Error with // Sometimes we get 400 Error with
// parts_mismatch immediately after uploading // parts_mismatch immediately after uploading

165
backend/cache/cache.go vendored
View File

@@ -18,19 +18,18 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/ncw/rclone/backend/crypt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/atexit"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"golang.org/x/time/rate" "golang.org/x/time/rate"
) )
@@ -482,7 +481,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath) return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
} }
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath) f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(f.opt.TempWritePath) f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err) return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
} }
@@ -509,7 +508,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil { if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1) pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval) pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval) doChangeNotify(f.receiveChangeNotify, pollInterval)
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -600,7 +599,7 @@ is used on top of the cache.
return f, fsErr return f, fsErr
} }
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
m, err := f.Stats() m, err := f.Stats()
if err != nil { if err != nil {
@@ -627,7 +626,7 @@ func (f *Fs) unwrapRemote(remote string) string {
return remote return remote
} }
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) { func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
out = make(rc.Params) out = make(rc.Params)
remoteInt, ok := in["remote"] remoteInt, ok := in["remote"]
if !ok { if !ok {
@@ -672,7 +671,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
return out, nil return out, nil
} }
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) { func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
type chunkRange struct { type chunkRange struct {
start, end int64 start, end int64
} }
@@ -777,18 +776,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
for _, pair := range files { for _, pair := range files {
file, remote := pair[0], pair[1] file, remote := pair[0], pair[1]
var status fileStatus var status fileStatus
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
co := o.(*Object) co := o.(*Object)
err = co.refreshFromSource(ctx, true) err = co.refreshFromSource(true)
if err != nil { if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()} fetchedChunks[file] = fileStatus{Error: err.Error()}
continue continue
} }
handle := NewObjectHandle(ctx, co, f) handle := NewObjectHandle(co, f)
handle.UseMemory = false handle.UseMemory = false
handle.scaleWorkers(1) handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) { walkChunkRanges(crs, co.Size(), func(chunk int64) {
@@ -874,7 +873,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subscribe multiple callers // ChangeNotify can subscribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it) // this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes // and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.parentsForgetMu.Lock() f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock() defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify") fs.Debugf(f, "subscribing to ChangeNotify")
@@ -921,7 +920,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
var err error var err error
fs.Debugf(f, "new object '%s'", remote) fs.Debugf(f, "new object '%s'", remote)
@@ -940,16 +939,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// search for entry in source or temp fs // search for entry in source or temp fs
var obj fs.Object var obj fs.Object
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
obj, err = f.tempFs.NewObject(ctx, remote) obj, err = f.tempFs.NewObject(remote)
// not found in temp fs // not found in temp fs
if err != nil { if err != nil {
fs.Debugf(remote, "find: not found in local cache fs") fs.Debugf(remote, "find: not found in local cache fs")
obj, err = f.Fs.NewObject(ctx, remote) obj, err = f.Fs.NewObject(remote)
} else { } else {
fs.Debugf(obj, "find: found in local cache fs") fs.Debugf(obj, "find: found in local cache fs")
} }
} else { } else {
obj, err = f.Fs.NewObject(ctx, remote) obj, err = f.Fs.NewObject(remote)
} }
// not found in either fs // not found in either fs
@@ -959,13 +958,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
} }
// cache the new entry // cache the new entry
co = ObjectFromOriginal(ctx, f, obj).persist() co = ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "find: cached object") fs.Debugf(co, "find: cached object")
return co, nil return co, nil
} }
// List the objects and directories in dir into entries // List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "list '%s'", dir) fs.Debugf(f, "list '%s'", dir)
cd := ShallowDirectory(f, dir) cd := ShallowDirectory(f, dir)
@@ -995,12 +994,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries) fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
for _, queuedRemote := range queuedEntries { for _, queuedRemote := range queuedEntries {
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote)) queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
if err != nil { if err != nil {
fs.Debugf(dir, "list: temp file not found in local fs: %v", err) fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
continue continue
} }
co := ObjectFromOriginal(ctx, f, queuedEntry).persist() co := ObjectFromOriginal(f, queuedEntry).persist()
fs.Debugf(co, "list: cached temp object") fs.Debugf(co, "list: cached temp object")
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
} }
@@ -1008,7 +1007,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
// search from the source // search from the source
sourceEntries, err := f.Fs.List(ctx, dir) sourceEntries, err := f.Fs.List(dir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1046,11 +1045,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if i < tmpCnt && cachedEntries[i].Remote() == oRemote { if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
continue continue
} }
co := ObjectFromOriginal(ctx, f, o).persist() co := ObjectFromOriginal(f, o).persist()
cachedEntries = append(cachedEntries, co) cachedEntries = append(cachedEntries, co)
fs.Debugf(dir, "list: cached object: %v", co) fs.Debugf(dir, "list: cached object: %v", co)
case fs.Directory: case fs.Directory:
cdd := DirectoryFromOriginal(ctx, f, o) cdd := DirectoryFromOriginal(f, o)
// check if the dir isn't expired and add it in cache if it isn't // check if the dir isn't expired and add it in cache if it isn't
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) { if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
batchDirectories = append(batchDirectories, cdd) batchDirectories = append(batchDirectories, cdd)
@@ -1080,8 +1079,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil return cachedEntries, nil
} }
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error { func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir) entries, err := f.List(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1089,7 +1088,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
for i := 0; i < len(entries); i++ { for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory) innerDir, ok := entries[i].(fs.Directory)
if ok { if ok {
err := f.recurse(ctx, innerDir.Remote(), list) err := f.recurse(innerDir.Remote(), list)
if err != nil { if err != nil {
return err return err
} }
@@ -1106,21 +1105,21 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
// from dir recursively into out. // from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "list recursively from '%s'", dir) fs.Debugf(f, "list recursively from '%s'", dir)
// we check if the source FS supports ListR // we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return // if it does, we'll use that to get all the entries, cache them and return
do := f.Fs.Features().ListR do := f.Fs.Features().ListR
if do != nil { if do != nil {
return do(ctx, dir, func(entries fs.DirEntries) error { return do(dir, func(entries fs.DirEntries) error {
// we got called back with a set of entries so let's cache them and call the original callback // we got called back with a set of entries so let's cache them and call the original callback
for _, entry := range entries { for _, entry := range entries {
switch o := entry.(type) { switch o := entry.(type) {
case fs.Object: case fs.Object:
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o)) _ = f.cache.AddObject(ObjectFromOriginal(f, o))
case fs.Directory: case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o)) _ = f.cache.AddDir(DirectoryFromOriginal(f, o))
default: default:
return errors.Errorf("Unknown object type %T", entry) return errors.Errorf("Unknown object type %T", entry)
} }
@@ -1133,7 +1132,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// if we're here, we're gonna do a standard recursive traversal and cache everything // if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list) err = f.recurse(dir, list)
if err != nil { if err != nil {
return err return err
} }
@@ -1142,9 +1141,9 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
fs.Debugf(f, "mkdir '%s'", dir) fs.Debugf(f, "mkdir '%s'", dir)
err := f.Fs.Mkdir(ctx, dir) err := f.Fs.Mkdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1172,7 +1171,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
fs.Debugf(f, "rmdir '%s'", dir) fs.Debugf(f, "rmdir '%s'", dir)
if f.opt.TempWritePath != "" { if f.opt.TempWritePath != "" {
@@ -1182,9 +1181,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
_, err := f.UnWrap().List(ctx, dir) _, err := f.UnWrap().List(dir)
if err == nil { if err == nil {
err := f.Fs.Rmdir(ctx, dir) err := f.Fs.Rmdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1192,10 +1191,10 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
var queuedEntries []*Object var queuedEntries []*Object
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo) co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
} }
} }
@@ -1212,7 +1211,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
} }
} else { } else {
err := f.Fs.Rmdir(ctx, dir) err := f.Fs.Rmdir(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -1243,7 +1242,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// DirMove moves src, srcRemote to this remote at dstRemote // DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations. // using server side move operations.
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote) fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
@@ -1265,8 +1264,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
f.backgroundRunner.pause() f.backgroundRunner.pause()
defer f.backgroundRunner.play() defer f.backgroundRunner.play()
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote) _, errInWrap := srcFs.UnWrap().List(srcRemote)
_, errInTemp := f.tempFs.List(ctx, srcRemote) _, errInTemp := f.tempFs.List(srcRemote)
// not found in either fs // not found in either fs
if errInWrap != nil && errInTemp != nil { if errInWrap != nil && errInTemp != nil {
return fs.ErrorDirNotFound return fs.ErrorDirNotFound
@@ -1275,7 +1274,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// we check if the source exists on the remote and make the same move on it too if it does // we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step // otherwise, we skip this step
if errInWrap == nil { if errInWrap == nil {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1288,10 +1287,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
var queuedEntries []*Object var queuedEntries []*Object
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(ctx, f, oo) co := ObjectFromOriginal(f, oo)
queuedEntries = append(queuedEntries, co) queuedEntries = append(queuedEntries, co)
if co.tempFileStartedUpload() { if co.tempFileStartedUpload() {
fs.Errorf(co, "can't move - upload has already started. need to finish that") fs.Errorf(co, "can't move - upload has already started. need to finish that")
@@ -1312,16 +1311,16 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs") fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
err = do(ctx, f.tempFs, srcRemote, dstRemote) err = do(f.tempFs, srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
err = f.cache.ReconcileTempUploads(ctx, f) err = f.cache.ReconcileTempUploads(f)
if err != nil { if err != nil {
return err return err
} }
} else { } else {
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote) err := do(srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil { if err != nil {
return err return err
} }
@@ -1427,10 +1426,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
} }
} }
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put in to the remote path // put in to the remote path
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
var err error var err error
var obj fs.Object var obj fs.Object
@@ -1441,7 +1440,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
_ = f.cache.ExpireDir(parentCd) _ = f.cache.ExpireDir(parentCd)
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
obj, err = f.tempFs.Put(ctx, in, src, options...) obj, err = f.tempFs.Put(in, src, options...)
if err != nil { if err != nil {
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err) fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
return nil, err return nil, err
@@ -1456,14 +1455,14 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// if cache writes is enabled write it first through cache // if cache writes is enabled write it first through cache
} else if f.opt.StoreWrites { } else if f.opt.StoreWrites {
f.cacheReader(in, src, func(inn io.Reader) { f.cacheReader(in, src, func(inn io.Reader) {
obj, err = put(ctx, inn, src, options...) obj, err = put(inn, src, options...)
}) })
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache") fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
} }
// last option: save it directly in remote fs // last option: save it directly in remote fs
} else { } else {
obj, err = put(ctx, in, src, options...) obj, err = put(in, src, options...)
if err == nil { if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs") fs.Debugf(obj, "put: uploaded to remote fs")
} }
@@ -1475,7 +1474,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// cache the new file // cache the new file
cachedObj := ObjectFromOriginal(ctx, f, obj) cachedObj := ObjectFromOriginal(f, obj)
// deleting cached chunks and info to be replaced with new ones // deleting cached chunks and info to be replaced with new ones
_ = f.cache.RemoveObject(cachedObj.abs()) _ = f.cache.RemoveObject(cachedObj.abs())
@@ -1498,33 +1497,33 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fs.Debugf(f, "put data at '%s'", src.Remote()) fs.Debugf(f, "put data at '%s'", src.Remote())
return f.put(ctx, in, src, options, f.Fs.Put) return f.put(in, src, options, f.Fs.Put)
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
} }
fs.Debugf(f, "put data unchecked in '%s'", src.Remote()) fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
return f.put(ctx, in, src, options, do) return f.put(in, src, options, do)
} }
// PutStream uploads the object // PutStream uploads the object
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutStream do := f.Fs.Features().PutStream
if do == nil { if do == nil {
return nil, errors.New("can't PutStream") return nil, errors.New("can't PutStream")
} }
fs.Debugf(f, "put data streaming in '%s'", src.Remote()) fs.Debugf(f, "put data streaming in '%s'", src.Remote())
return f.put(ctx, in, src, options, do) return f.put(in, src, options, do)
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote) fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
@@ -1544,7 +1543,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil { if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't copy %v - %v", src, err) fs.Errorf(f, "can't copy %v - %v", src, err)
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
@@ -1563,7 +1562,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
} }
obj, err := do(ctx, srcObj.Object, remote) obj, err := do(srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving in cache: %v", err) fs.Errorf(srcObj, "error moving in cache: %v", err)
return nil, err return nil, err
@@ -1571,7 +1570,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(obj, "copy: file copied") fs.Debugf(obj, "copy: file copied")
// persist new // persist new
co := ObjectFromOriginal(ctx, f, obj).persist() co := ObjectFromOriginal(f, obj).persist()
fs.Debugf(co, "copy: added to cache") fs.Debugf(co, "copy: added to cache")
// expire the destination path // expire the destination path
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote()))) parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
@@ -1598,7 +1597,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// Move src to this remote using server side move operations. // Move src to this remote using server side move operations.
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote) fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
// if source fs doesn't support move abort // if source fs doesn't support move abort
@@ -1619,7 +1618,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
// refresh from source or abort // refresh from source or abort
if err := srcObj.refreshFromSource(ctx, false); err != nil { if err := srcObj.refreshFromSource(false); err != nil {
fs.Errorf(f, "can't move %v - %v", src, err) fs.Errorf(f, "can't move %v - %v", src, err)
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
@@ -1655,7 +1654,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Debugf(srcObj, "move: queued file moved to %v", remote) fs.Debugf(srcObj, "move: queued file moved to %v", remote)
} }
obj, err := do(ctx, srcObj.Object, remote) obj, err := do(srcObj.Object, remote)
if err != nil { if err != nil {
fs.Errorf(srcObj, "error moving: %v", err) fs.Errorf(srcObj, "error moving: %v", err)
return nil, err return nil, err
@@ -1680,7 +1679,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory) f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
// persist new // persist new
cachedObj := ObjectFromOriginal(ctx, f, obj).persist() cachedObj := ObjectFromOriginal(f, obj).persist()
fs.Debugf(cachedObj, "move: added to cache") fs.Debugf(cachedObj, "move: added to cache")
// expire new parent // expire new parent
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote()))) parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
@@ -1702,7 +1701,7 @@ func (f *Fs) Hashes() hash.Set {
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
fs.Infof(f, "purging cache") fs.Infof(f, "purging cache")
f.cache.Purge() f.cache.Purge()
@@ -1711,7 +1710,7 @@ func (f *Fs) Purge(ctx context.Context) error {
return nil return nil
} }
err := do(ctx) err := do()
if err != nil { if err != nil {
return err return err
} }
@@ -1720,7 +1719,7 @@ func (f *Fs) Purge(ctx context.Context) error {
} }
// CleanUp the trash in the Fs // CleanUp the trash in the Fs
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
f.CleanUpCache(false) f.CleanUpCache(false)
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
@@ -1728,16 +1727,16 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil return nil
} }
return do(ctx) return do()
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do(ctx) return do()
} }
// Stats returns stats about the cache storage // Stats returns stats about the cache storage

View File

@@ -4,7 +4,6 @@ package cache_test
import ( import (
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
goflag "flag" goflag "flag"
"fmt" "fmt"
@@ -22,19 +21,19 @@ import (
"testing" "testing"
"time" "time"
"github.com/ncw/rclone/backend/cache"
"github.com/ncw/rclone/backend/crypt"
_ "github.com/ncw/rclone/backend/drive"
"github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/object"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
"github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -121,7 +120,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
listRootInner, err := runInstance.list(t, rootFs, innerFolder) listRootInner, err := runInstance.list(t, rootFs, innerFolder)
require.NoError(t, err) require.NoError(t, err)
listInner, err := rootFs2.List(context.Background(), "") listInner, err := rootFs2.List("")
require.NoError(t, err) require.NoError(t, err)
require.Len(t, listRoot, 1) require.Len(t, listRoot, 1)
@@ -139,10 +138,10 @@ func TestInternalVfsCache(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"}) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, rootFs, "test/second", "content") runInstance.writeObjectString(t, rootFs, "test/second", "content")
_, err = rootFs.List(context.Background(), "test") _, err = rootFs.List("test")
require.NoError(t, err) require.NoError(t, err)
testReader := runInstance.randomReader(t, testSize) testReader := runInstance.randomReader(t, testSize)
@@ -267,7 +266,7 @@ func TestInternalObjNotFound(t *testing.T) {
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil) rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404") obj, err := rootFs.NewObject("404")
require.Error(t, err) require.Error(t, err)
require.Nil(t, obj) require.Nil(t, obj)
} }
@@ -446,7 +445,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
log.Printf("original size: %v", originalSize) log.Printf("original size: %v", originalSize)
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
expectedSize := int64(len([]byte("test content"))) expectedSize := int64(len([]byte("test content")))
var data2 []byte var data2 []byte
@@ -458,7 +457,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
data2 = []byte("test content") data2 = []byte("test content")
} }
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap()) objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) err = o.Update(bytes.NewReader(data2), objInfo)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data2)), o.Size()) require.Equal(t, int64(len(data2)), o.Size())
log.Printf("updated size: %v", len(data2)) log.Printf("updated size: %v", len(data2))
@@ -504,9 +503,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second")) _ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
// list in mount // list in mount
@@ -516,7 +515,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -590,9 +589,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
} else { } else {
testData = []byte("test content") testData = []byte("test content")
} }
err = rootFs.Mkdir(context.Background(), "test") err = rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "test/one") err = rootFs.Mkdir("test/one")
require.NoError(t, err) require.NoError(t, err)
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData) srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
@@ -609,7 +608,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
require.False(t, found) require.False(t, found)
// move file // move file
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName) _, err = cfs.UnWrap().Features().Move(srcObj, dstName)
require.NoError(t, err) require.NoError(t, err)
err = runInstance.retryBlock(func() error { err = runInstance.retryBlock(func() error {
@@ -671,23 +670,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime) err = o.SetModTime(wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin") co, err := rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
cfs.DirCacheFlush() // flush the cache cfs.DirCacheFlush() // flush the cache
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin") co, err = rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
} }
func TestInternalChangeSeenAfterRc(t *testing.T) { func TestInternalChangeSeenAfterRc(t *testing.T) {
@@ -714,19 +713,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
// update in the wrapped fs // update in the wrapped fs
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
wrappedTime := time.Now().Add(-1 * time.Hour) wrappedTime := time.Now().Add(-1 * time.Hour)
err = o.SetModTime(context.Background(), wrappedTime) err = o.SetModTime(wrappedTime)
require.NoError(t, err) require.NoError(t, err)
// get a new instance from the cache // get a new instance from the cache
co, err := rootFs.NewObject(context.Background(), "data.bin") co, err := rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String()) require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
// Call the rc function // Call the rc function
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"}) m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -734,9 +733,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Contains(t, m["message"], "cached file cleared") require.Contains(t, m["message"], "cached file cleared")
// get a new instance from the cache // get a new instance from the cache
co, err = rootFs.NewObject(context.Background(), "data.bin") co, err = rootFs.NewObject("data.bin")
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix()) require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
_, err = runInstance.list(t, rootFs, "") _, err = runInstance.list(t, rootFs, "")
require.NoError(t, err) require.NoError(t, err)
@@ -750,7 +749,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
require.Len(t, li1, 1) require.Len(t, li1, 1)
// Call the rc function // Call the rc function
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"}) m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
require.NoError(t, err) require.NoError(t, err)
require.Contains(t, m, "status") require.Contains(t, m, "status")
require.Contains(t, m, "message") require.Contains(t, m, "message")
@@ -795,7 +794,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// create some rand test data // create some rand test data
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2)) testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData) runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
require.NoError(t, err) require.NoError(t, err)
co, ok := o.(*cache.Object) co, ok := o.(*cache.Object)
require.True(t, ok) require.True(t, ok)
@@ -834,7 +833,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, l, 1) require.Len(t, l, 1)
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third")) err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
require.NoError(t, err) require.NoError(t, err)
l, err = runInstance.list(t, rootFs, "test") l, err = runInstance.list(t, rootFs, "test")
@@ -869,14 +868,14 @@ func TestInternalBug2117(t *testing.T) {
cfs, err := runInstance.getCacheFs(rootFs) cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err) require.NoError(t, err)
err = cfs.UnWrap().Mkdir(context.Background(), "test") err = cfs.UnWrap().Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
for i := 1; i <= 4; i++ { for i := 1; i <= 4; i++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
require.NoError(t, err) require.NoError(t, err)
for j := 1; j <= 4; j++ { for j := 1; j <= 4; j++ {
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j)) err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
require.NoError(t, err) require.NoError(t, err)
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test") runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
@@ -1081,10 +1080,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
} }
if purge { if purge {
_ = f.Features().Purge(context.Background()) _ = f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
} }
err = f.Mkdir(context.Background(), "") err = f.Mkdir("")
require.NoError(t, err) require.NoError(t, err)
if r.useMount && !r.isMounted { if r.useMount && !r.isMounted {
r.mountFs(t, f) r.mountFs(t, f)
@@ -1098,7 +1097,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
r.unmountFs(t, f) r.unmountFs(t, f)
} }
err := f.Features().Purge(context.Background()) err := f.Features().Purge()
require.NoError(t, err) require.NoError(t, err)
cfs, err := r.getCacheFs(f) cfs, err := r.getCacheFs(f)
require.NoError(t, err) require.NoError(t, err)
@@ -1200,7 +1199,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object { func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
in := bytes.NewReader(data) in := bytes.NewReader(data)
_ = r.writeObjectReader(t, f, remote, in) _ = r.writeObjectReader(t, f, remote, in)
o, err := f.NewObject(context.Background(), remote) o, err := f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, int64(len(data)), o.Size()) require.Equal(t, int64(len(data)), o.Size())
return o return o
@@ -1209,7 +1208,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object { func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
modTime := time.Now() modTime := time.Now()
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f) objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
obj, err := f.Put(context.Background(), in, objInfo) obj, err := f.Put(in, objInfo)
require.NoError(t, err) require.NoError(t, err)
if r.useMount { if r.useMount {
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
@@ -1229,18 +1228,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600) err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
require.NoError(t, err) require.NoError(t, err)
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
} else { } else {
in1 := bytes.NewReader(data1) in1 := bytes.NewReader(data1)
in2 := bytes.NewReader(data2) in2 := bytes.NewReader(data2)
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f) objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f) objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
obj, err = f.Put(context.Background(), in1, objInfo1) obj, err = f.Put(in1, objInfo1)
require.NoError(t, err) require.NoError(t, err)
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
require.NoError(t, err) require.NoError(t, err)
err = obj.Update(context.Background(), in2, objInfo2) err = obj.Update(in2, objInfo2)
} }
require.NoError(t, err) require.NoError(t, err)
@@ -1269,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
return checkSample, err return checkSample, err
} }
} else { } else {
co, err := f.NewObject(context.Background(), remote) co, err := f.NewObject(remote)
if err != nil { if err != nil {
return checkSample, err return checkSample, err
} }
@@ -1284,7 +1283,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte { func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
size := end - offset size := end - offset
checkSample := make([]byte, size) checkSample := make([]byte, size)
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset}) reader, err := o.Open(&fs.SeekOption{Offset: offset})
require.NoError(t, err) require.NoError(t, err)
totalRead, err := io.ReadFull(reader, checkSample) totalRead, err := io.ReadFull(reader, checkSample)
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck { if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
@@ -1301,7 +1300,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
if r.useMount { if r.useMount {
err = os.Mkdir(path.Join(r.mntDir, remote), 0700) err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
} else { } else {
err = f.Mkdir(context.Background(), remote) err = f.Mkdir(remote)
} }
require.NoError(t, err) require.NoError(t, err)
} }
@@ -1313,11 +1312,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
err = os.Remove(path.Join(r.mntDir, remote)) err = os.Remove(path.Join(r.mntDir, remote))
} else { } else {
var obj fs.Object var obj fs.Object
obj, err = f.NewObject(context.Background(), remote) obj, err = f.NewObject(remote)
if err != nil { if err != nil {
err = f.Rmdir(context.Background(), remote) err = f.Rmdir(remote)
} else { } else {
err = obj.Remove(context.Background()) err = obj.Remove()
} }
} }
@@ -1335,7 +1334,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll) l = append(l, ll)
} }
@@ -1354,7 +1353,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
} }
} else { } else {
var list fs.DirEntries var list fs.DirEntries
list, err = f.List(context.Background(), remote) list, err = f.List(remote)
for _, ll := range list { for _, ll := range list {
l = append(l, ll.Remote()) l = append(l, ll.Remote())
} }
@@ -1394,7 +1393,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().DirMove != nil { } else if rootFs.Features().DirMove != nil {
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst) err = rootFs.Features().DirMove(rootFs, src, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1416,11 +1415,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Move != nil { } else if rootFs.Features().Move != nil {
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Move(context.Background(), obj1, dst) _, err = rootFs.Features().Move(obj1, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1442,11 +1441,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
} }
r.vfs.WaitForWriters(10 * time.Second) r.vfs.WaitForWriters(10 * time.Second)
} else if rootFs.Features().Copy != nil { } else if rootFs.Features().Copy != nil {
obj, err := rootFs.NewObject(context.Background(), src) obj, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
_, err = rootFs.Features().Copy(context.Background(), obj, dst) _, err = rootFs.Features().Copy(obj, dst)
if err != nil { if err != nil {
return err return err
} }
@@ -1468,11 +1467,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
} }
return fi.ModTime(), nil return fi.ModTime(), nil
} }
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return time.Time{}, err return time.Time{}, err
} }
return obj1.ModTime(context.Background()), nil return obj1.ModTime(), nil
} }
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) { func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
@@ -1485,7 +1484,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
} }
return fi.Size(), nil return fi.Size(), nil
} }
obj1, err := rootFs.NewObject(context.Background(), src) obj1, err := rootFs.NewObject(src)
if err != nil { if err != nil {
return int64(0), err return int64(0), err
} }
@@ -1508,14 +1507,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
_, err = f.WriteString(data + append) _, err = f.WriteString(data + append)
} else { } else {
var obj1 fs.Object var obj1 fs.Object
obj1, err = rootFs.NewObject(context.Background(), src) obj1, err = rootFs.NewObject(src)
if err != nil { if err != nil {
return err return err
} }
data1 := []byte(data + append) data1 := []byte(data + append)
r := bytes.NewReader(data1) r := bytes.NewReader(data1)
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs) objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
err = obj1.Update(context.Background(), r, objInfo1) err = obj1.Update(r, objInfo1)
} }
return err return err

View File

@@ -9,9 +9,9 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"github.com/rclone/rclone/cmd/mount" "github.com/ncw/rclone/cmd/mount"
"github.com/rclone/rclone/cmd/mountlib" "github.com/ncw/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -9,10 +9,10 @@ import (
"time" "time"
"github.com/billziss-gh/cgofuse/fuse" "github.com/billziss-gh/cgofuse/fuse"
"github.com/ncw/rclone/cmd/cmount"
"github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/cmd/cmount"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -7,9 +7,9 @@ package cache_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
@@ -17,7 +17,5 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
}) })
} }

View File

@@ -3,7 +3,6 @@
package cache_test package cache_test
import ( import (
"context"
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
@@ -12,9 +11,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/backend/cache" "github.com/ncw/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/ncw/rclone/backend/drive"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -86,11 +85,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -123,11 +122,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"}) map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one") err := rootFs.Mkdir("one")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "one/test") err = rootFs.Mkdir("one/test")
require.NoError(t, err) require.NoError(t, err)
err = rootFs.Mkdir(context.Background(), "second") err = rootFs.Mkdir("second")
require.NoError(t, err) require.NoError(t, err)
// create some rand test data // create some rand test data
@@ -166,7 +165,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"}) map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb) defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test") err := rootFs.Mkdir("test")
require.NoError(t, err) require.NoError(t, err)
minSize := 5242880 minSize := 5242880
maxSize := 10485760 maxSize := 10485760
@@ -234,9 +233,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "second/one") _, err = rootFs.NewObject("second/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -257,7 +256,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), "directory not empty") require.Contains(t, err.Error(), "directory not empty")
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -271,9 +270,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -290,9 +289,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -307,7 +306,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
// test Remove -- allowed // test Remove -- allowed
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.Error(t, err) require.Error(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -319,7 +318,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated") err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
require.NoError(t, err) require.NoError(t, err)
obj2, err := rootFs.NewObject(context.Background(), "test/one") obj2, err := rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false) data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
require.Equal(t, "one content updated", string(data2)) require.Equal(t, "one content updated", string(data2))
@@ -367,7 +366,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.dirMove(t, rootFs, "test", "second") err = runInstance.dirMove(t, rootFs, "test", "second")
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -379,7 +378,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Rmdir // test Rmdir
err = runInstance.rm(t, rootFs, "test") err = runInstance.rm(t, rootFs, "test")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -390,9 +389,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
if err != errNotSupported { if err != errNotSupported {
require.Error(t, err) require.Error(t, err)
// try to read from it // try to read from it
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/second") _, err = rootFs.NewObject("test/second")
require.Error(t, err) require.Error(t, err)
// validate that it exists in temp fs // validate that it exists in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
@@ -405,9 +404,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third")) err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
if err != errNotSupported { if err != errNotSupported {
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
_, err = rootFs.NewObject(context.Background(), "test/third") _, err = rootFs.NewObject("test/third")
require.NoError(t, err) require.NoError(t, err)
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false) data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
require.NoError(t, err) require.NoError(t, err)
@@ -422,7 +421,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
// test Remove // test Remove
err = runInstance.rm(t, rootFs, "test/one") err = runInstance.rm(t, rootFs, "test/one")
require.Error(t, err) require.Error(t, err)
_, err = rootFs.NewObject(context.Background(), "test/one") _, err = rootFs.NewObject("test/one")
require.NoError(t, err) require.NoError(t, err)
// validate that it doesn't exist in temp fs // validate that it doesn't exist in temp fs
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one"))) _, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))

View File

@@ -3,11 +3,10 @@
package cache package cache
import ( import (
"context"
"path" "path"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
) )
// Directory is a generic dir that stores basic information about it // Directory is a generic dir that stores basic information about it
@@ -56,7 +55,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
} }
// DirectoryFromOriginal builds one from a generic fs.Directory // DirectoryFromOriginal builds one from a generic fs.Directory
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory { func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
var cd *Directory var cd *Directory
fullRemote := path.Join(f.Root(), d.Remote()) fullRemote := path.Join(f.Root(), d.Remote())
@@ -68,7 +67,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
CacheFs: f, CacheFs: f,
Name: name, Name: name,
Dir: dir, Dir: dir,
CacheModTime: d.ModTime(ctx).UnixNano(), CacheModTime: d.ModTime().UnixNano(),
CacheSize: d.Size(), CacheSize: d.Size(),
CacheItems: d.Items(), CacheItems: d.Items(),
CacheType: "Directory", CacheType: "Directory",
@@ -111,7 +110,7 @@ func (d *Directory) parentRemote() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (d *Directory) ModTime(ctx context.Context) time.Time { func (d *Directory) ModTime() time.Time {
return time.Unix(0, d.CacheModTime) return time.Unix(0, d.CacheModTime)
} }

View File

@@ -3,7 +3,6 @@
package cache package cache
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"path" "path"
@@ -12,9 +11,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
) )
var uploaderMap = make(map[string]*backgroundWriter) var uploaderMap = make(map[string]*backgroundWriter)
@@ -41,7 +40,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
// Handle is managing the read/write/seek operations on an open handle // Handle is managing the read/write/seek operations on an open handle
type Handle struct { type Handle struct {
ctx context.Context
cachedObject *Object cachedObject *Object
cfs *Fs cfs *Fs
memory *Memory memory *Memory
@@ -60,9 +58,8 @@ type Handle struct {
} }
// NewObjectHandle returns a new Handle for an existing Object // NewObjectHandle returns a new Handle for an existing Object
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle { func NewObjectHandle(o *Object, cfs *Fs) *Handle {
r := &Handle{ r := &Handle{
ctx: ctx,
cachedObject: o, cachedObject: o,
cfs: cfs, cfs: cfs,
offset: 0, offset: 0,
@@ -354,7 +351,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
r := w.rc r := w.rc
if w.rc == nil { if w.rc == nil {
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -364,7 +361,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
if !closeOpen { if !closeOpen {
if do, ok := r.(fs.RangeSeeker); ok { if do, ok := r.(fs.RangeSeeker); ok {
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset) _, err = do.RangeSeek(offset, io.SeekStart, end-offset)
return r, err return r, err
} else if do, ok := r.(io.Seeker); ok { } else if do, ok := r.(io.Seeker); ok {
_, err = do.Seek(offset, io.SeekStart) _, err = do.Seek(offset, io.SeekStart)
@@ -374,7 +371,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
_ = w.rc.Close() _ = w.rc.Close()
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) { return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1}) r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -452,7 +449,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
// we seem to be getting only errors so we abort // we seem to be getting only errors so we abort
if err != nil { if err != nil {
fs.Errorf(w, "object open failed %v: %v", chunkStart, err) fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -465,7 +462,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
sourceRead, err = io.ReadFull(w.rc, data) sourceRead, err = io.ReadFull(w.rc, data)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err) fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true) err = w.r.cachedObject.refreshFromSource(true)
if err != nil { if err != nil {
fs.Errorf(w, "%v", err) fs.Errorf(w, "%v", err)
} }
@@ -591,7 +588,7 @@ func (b *backgroundWriter) run() {
remote := b.fs.cleanRootFromPath(absPath) remote := b.fs.cleanRootFromPath(absPath)
b.notify(remote, BackgroundUploadStarted, nil) b.notify(remote, BackgroundUploadStarted, nil)
fs.Infof(remote, "background upload: started upload") fs.Infof(remote, "background upload: started upload")
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote) err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
if err != nil { if err != nil {
b.notify(remote, BackgroundUploadError, err) b.notify(remote, BackgroundUploadError, err)
_ = b.fs.cache.rollbackPendingUpload(absPath) _ = b.fs.cache.rollbackPendingUpload(absPath)
@@ -601,14 +598,14 @@ func (b *backgroundWriter) run() {
// clean empty dirs up to root // clean empty dirs up to root
thisDir := cleanPath(path.Dir(remote)) thisDir := cleanPath(path.Dir(remote))
for thisDir != "" { for thisDir != "" {
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir) thisList, err := b.fs.tempFs.List(thisDir)
if err != nil { if err != nil {
break break
} }
if len(thisList) > 0 { if len(thisList) > 0 {
break break
} }
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir) err = b.fs.tempFs.Rmdir(thisDir)
fs.Debugf(thisDir, "cleaned from temp path") fs.Debugf(thisDir, "cleaned from temp path")
if err != nil { if err != nil {
break break

View File

@@ -3,16 +3,15 @@
package cache package cache
import ( import (
"context"
"io" "io"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
) )
const ( const (
@@ -69,7 +68,7 @@ func NewObject(f *Fs, remote string) *Object {
} }
// ObjectFromOriginal builds one from a generic fs.Object // ObjectFromOriginal builds one from a generic fs.Object
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object { func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
var co *Object var co *Object
fullRemote := cleanPath(path.Join(f.Root(), o.Remote())) fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
dir, name := path.Split(fullRemote) dir, name := path.Split(fullRemote)
@@ -93,13 +92,13 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
CacheType: cacheType, CacheType: cacheType,
CacheTs: time.Now(), CacheTs: time.Now(),
} }
co.updateData(ctx, o) co.updateData(o)
return co return co
} }
func (o *Object) updateData(ctx context.Context, source fs.Object) { func (o *Object) updateData(source fs.Object) {
o.Object = source o.Object = source
o.CacheModTime = source.ModTime(ctx).UnixNano() o.CacheModTime = source.ModTime().UnixNano()
o.CacheSize = source.Size() o.CacheSize = source.Size()
o.CacheStorable = source.Storable() o.CacheStorable = source.Storable()
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -131,20 +130,20 @@ func (o *Object) abs() string {
} }
// ModTime returns the cached ModTime // ModTime returns the cached ModTime
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
_ = o.refresh(ctx) _ = o.refresh()
return time.Unix(0, o.CacheModTime) return time.Unix(0, o.CacheModTime)
} }
// Size returns the cached Size // Size returns the cached Size
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheSize return o.CacheSize
} }
// Storable returns the cached Storable // Storable returns the cached Storable
func (o *Object) Storable() bool { func (o *Object) Storable() bool {
_ = o.refresh(context.TODO()) _ = o.refresh()
return o.CacheStorable return o.CacheStorable
} }
@@ -152,18 +151,18 @@ func (o *Object) Storable() bool {
// all these conditions must be true to ignore a refresh // all these conditions must be true to ignore a refresh
// 1. cache ts didn't expire yet // 1. cache ts didn't expire yet
// 2. is not pending a notification from the wrapped fs // 2. is not pending a notification from the wrapped fs
func (o *Object) refresh(ctx context.Context) error { func (o *Object) refresh() error {
isNotified := o.CacheFs.isNotifiedRemote(o.Remote()) isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge))) isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
if !isExpired && !isNotified { if !isExpired && !isNotified {
return nil return nil
} }
return o.refreshFromSource(ctx, true) return o.refreshFromSource(true)
} }
// refreshFromSource requests the original FS for the object in case it comes from a cached entry // refreshFromSource requests the original FS for the object in case it comes from a cached entry
func (o *Object) refreshFromSource(ctx context.Context, force bool) error { func (o *Object) refreshFromSource(force bool) error {
o.refreshMutex.Lock() o.refreshMutex.Lock()
defer o.refreshMutex.Unlock() defer o.refreshMutex.Unlock()
var err error var err error
@@ -173,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
return nil return nil
} }
if o.isTempFile() { if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote()) liveObject, err = o.ParentFs.NewObject(o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs) err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
} else { } else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote()) liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs) err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
} }
if err != nil { if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err) fs.Errorf(o, "error refreshing object in : %v", err)
return err return err
} }
o.updateData(ctx, liveObject) o.updateData(liveObject)
o.persist() o.persist()
return nil return nil
} }
// SetModTime sets the ModTime of this object // SetModTime sets the ModTime of this object
func (o *Object) SetModTime(ctx context.Context, t time.Time) error { func (o *Object) SetModTime(t time.Time) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
err := o.Object.SetModTime(ctx, t) err := o.Object.SetModTime(t)
if err != nil { if err != nil {
return err return err
} }
@@ -208,19 +207,19 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
} }
// Open is used to request a specific part of the file using fs.RangeOption // Open is used to request a specific part of the file using fs.RangeOption
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var err error var err error
if o.Object == nil { if o.Object == nil {
err = o.refreshFromSource(ctx, true) err = o.refreshFromSource(true)
} else { } else {
err = o.refresh(ctx) err = o.refresh()
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
cacheReader := NewObjectHandle(ctx, o, o.CacheFs) cacheReader := NewObjectHandle(o, o.CacheFs)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
switch x := option.(type) { switch x := option.(type) {
@@ -239,8 +238,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
// Update will change the object data // Update will change the object data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -255,7 +254,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "updating object contents with size %v", src.Size()) fs.Debugf(o, "updating object contents with size %v", src.Size())
// FIXME use reliable upload // FIXME use reliable upload
err := o.Object.Update(ctx, in, src, options...) err := o.Object.Update(in, src, options...)
if err != nil { if err != nil {
fs.Errorf(o, "error updating source: %v", err) fs.Errorf(o, "error updating source: %v", err)
return err return err
@@ -266,7 +265,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// advertise to ChangeNotify if wrapped doesn't do that // advertise to ChangeNotify if wrapped doesn't do that
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject) o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
o.CacheModTime = src.ModTime(ctx).UnixNano() o.CacheModTime = src.ModTime().UnixNano()
o.CacheSize = src.Size() o.CacheSize = src.Size()
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
o.CacheTs = time.Now() o.CacheTs = time.Now()
@@ -276,8 +275,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove deletes the object from both the cache and the source // Remove deletes the object from both the cache and the source
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return err return err
} }
// pause background uploads if active // pause background uploads if active
@@ -289,7 +288,7 @@ func (o *Object) Remove(ctx context.Context) error {
return errors.Errorf("%v is currently uploading, can't delete", o) return errors.Errorf("%v is currently uploading, can't delete", o)
} }
} }
err := o.Object.Remove(ctx) err := o.Object.Remove()
if err != nil { if err != nil {
return err return err
} }
@@ -307,8 +306,8 @@ func (o *Object) Remove(ctx context.Context) error {
// Hash requests a hash of the object and stores in the cache // Hash requests a hash of the object and stores in the cache
// since it might or might not be called, this is lazy loaded // since it might or might not be called, this is lazy loaded
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
_ = o.refresh(ctx) _ = o.refresh()
if o.CacheHashes == nil { if o.CacheHashes == nil {
o.CacheHashes = make(map[hash.Type]string) o.CacheHashes = make(map[hash.Type]string)
} }
@@ -317,10 +316,10 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if found { if found {
return cachedHash, nil return cachedHash, nil
} }
if err := o.refreshFromSource(ctx, false); err != nil { if err := o.refreshFromSource(false); err != nil {
return "", err return "", err
} }
liveHash, err := o.Object.Hash(ctx, ht) liveHash, err := o.Object.Hash(ht)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@@ -14,8 +14,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/rclone/rclone/fs"
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
) )

View File

@@ -7,9 +7,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
cache "github.com/patrickmn/go-cache" cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// Memory is a wrapper of transient storage for a go-cache store // Memory is a wrapper of transient storage for a go-cache store

View File

@@ -4,7 +4,6 @@ package cache
import ( import (
"bytes" "bytes"
"context"
"encoding/binary" "encoding/binary"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -17,9 +16,9 @@ import (
"time" "time"
bolt "github.com/coreos/bbolt" bolt "github.com/coreos/bbolt"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/walk"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
) )
// Constants // Constants
@@ -1015,7 +1014,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
} }
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue // ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error { func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error { return b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket)) _ = tx.DeleteBucket([]byte(tempBucket))
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket)) bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
@@ -1024,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
} }
var queuedEntries []fs.Object var queuedEntries []fs.Object
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries { for _, o := range entries {
if oo, ok := o.(fs.Object); ok { if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo) queuedEntries = append(queuedEntries, oo)

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"crypto/aes" "crypto/aes"
gocipher "crypto/cipher" gocipher "crypto/cipher"
"crypto/rand" "crypto/rand"
@@ -14,10 +13,10 @@ import (
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rfjakob/eme" "github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox" "golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt" "golang.org/x/crypto/scrypt"
@@ -69,7 +68,7 @@ type ReadSeekCloser interface {
} }
// OpenRangeSeek opens the file handle at the offset with the limit given // OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations // Cipher is used to swap out the encryption implementations
type Cipher interface { type Cipher interface {
@@ -86,7 +85,7 @@ type Cipher interface {
// DecryptData // DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error) DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position // DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted // EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64 EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted // DecryptedSize calculates the size of the data when decrypted
@@ -756,22 +755,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
} }
// newDecrypterSeek creates a new file handle decrypting on the fly // newDecrypterSeek creates a new file handle decrypting on the fly
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser var rc io.ReadCloser
doRangeSeek := false doRangeSeek := false
setLimit := false setLimit := false
// Open initially with no seek // Open initially with no seek
if offset == 0 && limit < 0 { if offset == 0 && limit < 0 {
// If no offset or limit then open whole file // If no offset or limit then open whole file
rc, err = open(ctx, 0, -1) rc, err = open(0, -1)
} else if offset == 0 { } else if offset == 0 {
// If no offset open the header + limit worth of the file // If no offset open the header + limit worth of the file
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit) _, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
setLimit = true setLimit = true
} else { } else {
// Otherwise just read the header to start with // Otherwise just read the header to start with
rc, err = open(ctx, 0, int64(fileHeaderSize)) rc, err = open(0, int64(fileHeaderSize))
doRangeSeek = true doRangeSeek = true
} }
if err != nil { if err != nil {
@@ -784,7 +783,7 @@ func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
} }
fh.open = open // will be called by fh.RangeSeek fh.open = open // will be called by fh.RangeSeek
if doRangeSeek { if doRangeSeek {
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) _, err = fh.RangeSeek(offset, io.SeekStart, limit)
if err != nil { if err != nil {
_ = fh.Close() _ = fh.Close()
return nil, err return nil, err
@@ -904,7 +903,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
// limiting the total length to limit. // limiting the total length to limit.
// //
// RangeSeek with a limit of < 0 is equivalent to a regular Seek. // RangeSeek with a limit of < 0 is equivalent to a regular Seek.
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
fh.mu.Lock() fh.mu.Lock()
defer fh.mu.Unlock() defer fh.mu.Unlock()
@@ -931,7 +930,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Can we seek underlying stream directly? // Can we seek underlying stream directly?
if do, ok := fh.rc.(fs.RangeSeeker); ok { if do, ok := fh.rc.(fs.RangeSeeker); ok {
// Seek underlying stream directly // Seek underlying stream directly
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) _, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(err) return 0, fh.finish(err)
} }
@@ -941,7 +940,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
fh.rc = nil fh.rc = nil
// Re-open the underlying object with the offset given // Re-open the underlying object with the offset given
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) rc, err := fh.open(underlyingOffset, underlyingLimit)
if err != nil { if err != nil {
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
} }
@@ -970,7 +969,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
// Seek implements the io.Seeker interface // Seek implements the io.Seeker interface
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
return fh.RangeSeek(context.TODO(), offset, whence, -1) return fh.RangeSeek(offset, whence, -1)
} }
// finish sets the final error and tidies up // finish sets the final error and tidies up
@@ -1044,8 +1043,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied // The open function must return a ReadCloser opened to the offset supplied
// //
// You must use this form of DecryptData if you might want to Seek the file handle // You must use this form of DecryptData if you might want to Seek the file handle
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit) out, err := c.newDecrypterSeek(open, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -2,7 +2,6 @@ package crypt
import ( import (
"bytes" "bytes"
"context"
"encoding/base32" "encoding/base32"
"fmt" "fmt"
"io" "io"
@@ -10,8 +9,8 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/ncw/rclone/backend/crypt/pkcs7"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -966,7 +965,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Open stream with a seek of underlyingOffset // Open stream with a seek of underlyingOffset
var reader io.ReadCloser var reader io.ReadCloser
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
end := len(ciphertext) end := len(ciphertext)
if underlyingLimit >= 0 { if underlyingLimit >= 0 {
end = int(underlyingOffset + underlyingLimit) end = int(underlyingOffset + underlyingLimit)
@@ -1007,7 +1006,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit)) rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(rc, offset, limit) check(rc, offset, limit)
@@ -1015,14 +1014,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} }
// Try decoding it with a single open and lots of seeks // Try decoding it with a single open and lots of seeks
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1) fh, err := c.DecryptDataSeek(open, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
for _, offset := range trials { for _, offset := range trials {
for _, limit := range limits { for _, limit := range limits {
if offset+limit > len(plaintext) { if offset+limit > len(plaintext) {
continue continue
} }
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit)) _, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
assert.NoError(t, err) assert.NoError(t, err)
check(fh, offset, limit) check(fh, offset, limit)
@@ -1073,7 +1072,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
} { } {
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit) what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
callCount := 0 callCount := 0
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
switch callCount { switch callCount {
case 0: case 0:
assert.Equal(t, int64(0), underlyingOffset, what) assert.Equal(t, int64(0), underlyingOffset, what)
@@ -1085,11 +1084,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
t.Errorf("Too many calls %d for %s", callCount+1, what) t.Errorf("Too many calls %d for %s", callCount+1, what)
} }
callCount++ callCount++
return open(ctx, underlyingOffset, underlyingLimit) return open(underlyingOffset, underlyingLimit)
} }
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1) fh, err := c.DecryptDataSeek(testOpen, 0, -1)
assert.NoError(t, err) assert.NoError(t, err)
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit) gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, gotOffset, test.offset) assert.Equal(t, gotOffset, test.offset)
} }

View File

@@ -2,20 +2,19 @@
package crypt package crypt
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fspath"
"github.com/ncw/rclone/fs/hash"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
) )
// Globals // Globals
@@ -170,10 +169,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false, WriteMimeType: false,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err return f, err
} }
@@ -190,7 +202,6 @@ type Options struct {
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
type Fs struct { type Fs struct {
fs.Fs fs.Fs
wrapper fs.Fs
name string name string
root string root string
opt Options opt Options
@@ -233,7 +244,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
} }
// Encrypt an directory file name to entries. // Encrypt an directory file name to entries.
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -243,18 +254,18 @@ func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Director
if f.opt.ShowMapping { if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote) fs.Logf(decryptedRemote, "Encrypts to %q", remote)
} }
*entries = append(*entries, f.newDir(ctx, dir)) *entries = append(*entries, f.newDir(dir))
} }
// Encrypt some directory entries. This alters entries returning it as newEntries. // Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter newEntries = entries[:0] // in place filter
for _, entry := range entries { for _, entry := range entries {
switch x := entry.(type) { switch x := entry.(type) {
case fs.Object: case fs.Object:
f.add(&newEntries, x) f.add(&newEntries, x)
case fs.Directory: case fs.Directory:
f.addDir(ctx, &newEntries, x) f.addDir(&newEntries, x)
default: default:
return nil, errors.Errorf("Unknown object type %T", entry) return nil, errors.Errorf("Unknown object type %T", entry)
} }
@@ -271,12 +282,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir)) entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.encryptEntries(ctx, entries) return f.encryptEntries(entries)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -295,9 +306,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
newEntries, err := f.encryptEntries(ctx, entries) newEntries, err := f.encryptEntries(entries)
if err != nil { if err != nil {
return err return err
} }
@@ -306,18 +317,18 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
} }
// NewObject finds the Object at remote. // NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return f.newObject(o), nil return f.newObject(o), nil
} }
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put implements Put or PutStream // put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, err := f.cipher.EncryptData(in) wrappedIn, err := f.cipher.EncryptData(in)
if err != nil { if err != nil {
@@ -343,7 +354,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
} }
// Transfer the data // Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...) o, err := put(wrappedIn, f.newObjectInfo(src), options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -352,13 +363,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if ht != hash.None && hasher != nil { if ht != hash.None && hasher != nil {
srcHash := hasher.Sums()[ht] srcHash := hasher.Sums()[ht]
var dstHash string var dstHash string
dstHash, err = o.Hash(ctx, ht) dstHash, err = o.Hash(ht)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read destination hash") return nil, errors.Wrap(err, "failed to read destination hash")
} }
if srcHash != "" && dstHash != "" && srcHash != dstHash { if srcHash != "" && dstHash != "" && srcHash != dstHash {
// remove object // remove object
err = o.Remove(ctx) err = o.Remove()
if err != nil { if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err) fs.Errorf(o, "Failed to remove corrupted object: %v", err)
} }
@@ -374,13 +385,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Put) return f.put(in, src, options, f.Fs.Put)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.put(ctx, in, src, options, f.Fs.Features().PutStream) return f.put(in, src, options, f.Fs.Features().PutStream)
} }
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
@@ -391,15 +402,15 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the directory (container, bucket) // Mkdir makes the directory (container, bucket)
// //
// Shouldn't return an error if it already exists // Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
} }
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
} }
// Purge all files in the root and the root directory // Purge all files in the root and the root directory
@@ -408,12 +419,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// quicker than just running Remove() on the result of List() // quicker than just running Remove() on the result of List()
// //
// Return an error if it doesn't exist // Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
do := f.Fs.Features().Purge do := f.Fs.Features().Purge
if do == nil { if do == nil {
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
return do(ctx) return do()
} }
// Copy src to this remote using server side copy operations. // Copy src to this remote using server side copy operations.
@@ -425,7 +436,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Copy do := f.Fs.Features().Copy
if do == nil { if do == nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
@@ -434,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -450,7 +461,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
do := f.Fs.Features().Move do := f.Fs.Features().Move
if do == nil { if do == nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
@@ -459,7 +470,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !ok { if !ok {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -474,7 +485,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
do := f.Fs.Features().DirMove do := f.Fs.Features().DirMove
if do == nil { if do == nil {
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
@@ -484,14 +495,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove return fs.ErrorCantDirMove
} }
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
// //
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked do := f.Fs.Features().PutUnchecked
if do == nil { if do == nil {
return nil, errors.New("can't PutUnchecked") return nil, errors.New("can't PutUnchecked")
@@ -500,7 +511,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil { if err != nil {
return nil, err return nil, err
} }
o, err := do(ctx, wrappedIn, f.newObjectInfo(src)) o, err := do(wrappedIn, f.newObjectInfo(src))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -511,21 +522,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// //
// Implement this if you have a way of emptying the trash or // Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files. // otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
do := f.Fs.Features().CleanUp do := f.Fs.Features().CleanUp
if do == nil { if do == nil {
return errors.New("can't CleanUp") return errors.New("can't CleanUp")
} }
return do(ctx) return do()
} }
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
do := f.Fs.Features().About do := f.Fs.Features().About
if do == nil { if do == nil {
return nil, errors.New("About not supported") return nil, errors.New("About not supported")
} }
return do(ctx) return do()
} }
// UnWrap returns the Fs that this Fs is wrapping // UnWrap returns the Fs that this Fs is wrapping
@@ -533,16 +544,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs return f.Fs
} }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name // EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string { func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName) return f.cipher.EncryptFileName(fileName)
@@ -557,10 +558,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
// src with it, and calculates the hash given by HashType on the fly // src with it, and calculates the hash given by HashType on the fly
// //
// Note that we break lots of encapsulation in this function. // Note that we break lots of encapsulation in this function.
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Read the nonce - opening the file is sufficient to read the nonce in // Read the nonce - opening the file is sufficient to read the nonce in
// use a limited read so we only read the header // use a limited read so we only read the header
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce") return "", errors.Wrap(err, "failed to open object to read nonce")
} }
@@ -590,7 +591,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
} }
// Open the src for input // Open the src for input
in, err = src.Open(ctx) in, err = src.Open()
if err != nil { if err != nil {
return "", errors.Wrap(err, "failed to open src") return "", errors.Wrap(err, "failed to open src")
} }
@@ -615,75 +616,6 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return m.Sums()[hashType], nil return m.Sums()[hashType], nil
} }
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(ctx, out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(ctx, remote)
if err != nil {
// assume it is a directory
return do(ctx, f.cipher.EncryptDirName(remote))
}
return do(ctx, o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
var (
err error
decrypted string
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
// //
// This decrypts the remote name and decrypts the data // This decrypts the remote name and decrypts the data
@@ -734,7 +666,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { func (o *Object) Hash(ht hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -744,7 +676,7 @@ func (o *Object) UnWrap() fs.Object {
} }
// Open opens the file for read. Call Close() on the returned io.ReadCloser // Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var openOptions []fs.OpenOption var openOptions []fs.OpenOption
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -758,10 +690,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
openOptions = append(openOptions, option) openOptions = append(openOptions, option)
} }
} }
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
if underlyingOffset == 0 && underlyingLimit < 0 { if underlyingOffset == 0 && underlyingLimit < 0 {
// Open with no seek // Open with no seek
return o.Object.Open(ctx, openOptions...) return o.Object.Open(openOptions...)
} }
// Open stream with a range of underlyingOffset, underlyingLimit // Open stream with a range of underlyingOffset, underlyingLimit
end := int64(-1) end := int64(-1)
@@ -772,7 +704,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
} }
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
return o.Object.Open(ctx, newOpenOptions...) return o.Object.Open(newOpenOptions...)
}, offset, limit) }, offset, limit)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -781,17 +713,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return o.Object, o.Object.Update(ctx, in, src, options...) return o.Object, o.Object.Update(in, src, options...)
} }
_, err := o.f.put(ctx, in, src, options, update) _, err := o.f.put(in, src, options, update)
return err return err
} }
// newDir returns a dir with the Name decrypted // newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { func (f *Fs) newDir(dir fs.Directory) fs.Directory {
newDir := fs.NewDirCopy(ctx, dir) newDir := fs.NewDirCopy(dir)
remote := dir.Remote() remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote) decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil { if err != nil {
@@ -838,38 +770,10 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file // Hash returns the selected checksum of the file
// If no checksum is available it returns "" // If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil return "", nil
} }
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -883,15 +787,7 @@ var (
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
) )

View File

@@ -6,13 +6,13 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/rclone/rclone/backend/crypt" "github.com/ncw/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive" // for integration tests _ "github.com/ncw/rclone/backend/drive" // for integration tests
_ "github.com/rclone/rclone/backend/local" _ "github.com/ncw/rclone/backend/local"
_ "github.com/rclone/rclone/backend/swift" // for integration tests _ "github.com/ncw/rclone/backend/swift" // for integration tests
"github.com/rclone/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
@@ -23,8 +23,6 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -44,8 +42,6 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -65,8 +61,6 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -87,7 +81,5 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }

View File

@@ -1,4 +1,7 @@
// Package drive interfaces with the Google Drive object storage system // Package drive interfaces with the Google Drive object storage system
// +build go1.9
package drive package drive
// FIXME need to deal with some corner cases // FIXME need to deal with some corner cases
@@ -9,7 +12,6 @@ package drive
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -26,20 +28,20 @@ import (
"text/template" "text/template"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
drive_v2 "google.golang.org/api/drive/v2" drive_v2 "google.golang.org/api/drive/v2"
@@ -333,7 +335,7 @@ export URLs for drive documents. Users have reported that the
official export URLs can't export large documents, whereas these official export URLs can't export large documents, whereas these
unofficial ones can. unofficial ones can.
See rclone issue [#2243](https://github.com/rclone/rclone/issues/2243) for background, See rclone issue [#2243](https://github.com/ncw/rclone/issues/2243) for background,
[this google drive issue](https://issuetracker.google.com/issues/36761333) and [this google drive issue](https://issuetracker.google.com/issues/36761333) and
[this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`, [this helpful post](https://www.labnol.org/internet/direct-links-for-google-drive/28356/).`,
Advanced: true, Advanced: true,
@@ -368,14 +370,6 @@ will download it anyway.`,
Default: false, Default: false,
Help: "Keep new head revision of each file forever.", Help: "Keep new head revision of each file forever.",
Advanced: true, Advanced: true,
}, {
Name: "size_as_quota",
Default: false,
Help: `Show storage quota usage for file size.
The storage used by a file is the size of the current version plus any
older versions that have been set to keep forever.`,
Advanced: true,
}, { }, {
Name: "v2_download_min_size", Name: "v2_download_min_size",
Default: fs.SizeSuffix(-1), Default: fs.SizeSuffix(-1),
@@ -391,16 +385,6 @@ older versions that have been set to keep forever.`,
Default: defaultBurst, Default: defaultBurst,
Help: "Number of API calls to allow without sleeping.", Help: "Number of API calls to allow without sleeping.",
Advanced: true, Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different drive configs.
This can be useful if you wish to do a server side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work beween any two
configurations.`,
Advanced: true,
}}, }},
}) })
@@ -443,11 +427,9 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
AcknowledgeAbuse bool `config:"acknowledge_abuse"` AcknowledgeAbuse bool `config:"acknowledge_abuse"`
KeepRevisionForever bool `config:"keep_revision_forever"` KeepRevisionForever bool `config:"keep_revision_forever"`
SizeAsQuota bool `config:"size_as_quota"`
V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"` V2DownloadMinSize fs.SizeSuffix `config:"v2_download_min_size"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"` PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"` PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
} }
// Fs represents a remote drive server // Fs represents a remote drive server
@@ -566,7 +548,7 @@ func containsString(slice []string, s string) bool {
// If the user fn ever returns true then it early exits with found = true // If the user fn ever returns true then it early exits with found = true
// //
// Search params: https://developers.google.com/drive/search-parameters // Search params: https://developers.google.com/drive/search-parameters
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) { func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
var query []string var query []string
if !includeAll { if !includeAll {
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly) q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
@@ -634,11 +616,11 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.ListChunk > 0 { if f.opt.ListChunk > 0 {
list.PageSize(f.opt.ListChunk) list.PageSize(f.opt.ListChunk)
} }
list.SupportsAllDrives(true)
list.IncludeItemsFromAllDrives(true)
if f.isTeamDrive { if f.isTeamDrive {
list.DriveId(f.opt.TeamDriveID) list.TeamDriveId(f.opt.TeamDriveID)
list.Corpora("drive") list.SupportsTeamDrives(true)
list.IncludeTeamDriveItems(true)
list.Corpora("teamDrive")
} }
// If using appDataFolder then need to add Spaces // If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" { if f.rootFolderID == "appDataFolder" {
@@ -653,9 +635,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.SkipChecksumGphotos { if f.opt.SkipChecksumGphotos {
fields += ",spaces" fields += ",spaces"
} }
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken", fields) fields = fmt.Sprintf("files(%s),nextPageToken", fields)
@@ -911,7 +890,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.Background()
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
err := configstruct.Set(m, opt) err := configstruct.Set(m, opt)
@@ -949,7 +927,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: true,
}).Fill(f) }).Fill(f)
// Create a new authorized Drive client. // Create a new authorized Drive client.
@@ -998,7 +976,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
} }
// Find the current root // Find the current root
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
// Assume it is a file // Assume it is a file
newRoot, remote := dircache.SplitPath(root) newRoot, remote := dircache.SplitPath(root)
@@ -1006,19 +984,19 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
tempF.root = newRoot tempF.root = newRoot
// Make new Fs which is the parent // Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false) err = tempF.dirCache.FindRoot(false)
if err != nil { if err != nil {
// No root so return old f // No root so return old f
return f, nil return f, nil
} }
_, err := tempF.NewObject(ctx, remote) _, err := tempF.NewObject(remote)
if err != nil { if err != nil {
// unable to list folder so return old f // unable to list folder so return old f
return f, nil return f, nil
} }
// XXX: update the old f here instead of returning tempF, since // XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver. // `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182 // See https://github.com/ncw/rclone/issues/2182
f.dirCache = tempF.dirCache f.dirCache = tempF.dirCache
f.root = tempF.root f.root = tempF.root
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
@@ -1032,17 +1010,13 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
if f.opt.UseCreatedDate { if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime modifiedDate = info.CreatedTime
} }
size := info.Size
if f.opt.SizeAsQuota {
size = info.QuotaBytesUsed
}
return baseObject{ return baseObject{
fs: f, fs: f,
remote: remote, remote: remote,
id: info.Id, id: info.Id,
modifiedDate: modifiedDate, modifiedDate: modifiedDate,
mimeType: info.MimeType, mimeType: info.MimeType,
bytes: size, bytes: info.Size,
} }
} }
@@ -1166,8 +1140,8 @@ func (f *Fs) newObjectWithExportInfo(
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote) info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1185,9 +1159,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID // Find the leaf in pathID
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool { found, err = f.list([]string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs { if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item) _, exportName, _, isDocument := f.findExportFormat(item)
if exportName == leaf { if exportName == leaf {
@@ -1208,7 +1182,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
} }
// CreateDir makes a directory with pathID as parent and name leaf // CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
// fmt.Println("Making", path) // fmt.Println("Making", path)
// Define the metadata for the directory we are going to create. // Define the metadata for the directory we are going to create.
createInfo := &drive.File{ createInfo := &drive.File{
@@ -1221,7 +1195,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo). info, err = f.svc.Files.Create(createInfo).
Fields("id"). Fields("id").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -1370,18 +1344,18 @@ func (f *Fs) findImportFormat(mimeType string) string {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var iErr error var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool { _, err = f.list([]string{directoryID}, "", false, false, false, func(item *drive.File) bool {
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item) entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
if err != nil { if err != nil {
iErr = err iErr = err
@@ -1434,7 +1408,7 @@ func (s listRSlices) Less(i, j int) bool {
// In each cycle it will read up to grouping entries from the in channel without blocking. // In each cycle it will read up to grouping entries from the in channel without blocking.
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed, // If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns. // nil is send to the out channel and the function returns.
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) { func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
var dirs []string var dirs []string
var paths []string var paths []string
@@ -1455,7 +1429,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
} }
listRSlices{dirs, paths}.Sort() listRSlices{dirs, paths}.Sort()
var iErr error var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool { _, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
for _, parent := range item.Parents { for _, parent := range item.Parents {
// only handle parents that are in the requested dirs list // only handle parents that are in the requested dirs list
i := sort.SearchStrings(dirs, parent) i := sort.SearchStrings(dirs, parent)
@@ -1510,17 +1484,17 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
const ( const (
grouping = 50 grouping = 50
inputBuffer = 1000 inputBuffer = 1000
) )
err = f.dirCache.FindRoot(ctx, false) err = f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
directoryID, err := f.dirCache.FindDir(ctx, dir, false) directoryID, err := f.dirCache.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
@@ -1529,7 +1503,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
err = f.pacer.CallNoRetry(func() (bool, error) { err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root"). info, err = f.svc.Files.Get("root").
Fields("id"). Fields("id").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -1564,7 +1538,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
in <- listREntry{directoryID, dir} in <- listREntry{directoryID, dir}
for i := 0; i < fs.Config.Checkers; i++ { for i := 0; i < fs.Config.Checkers; i++ {
go f.listRRunner(ctx, &wg, in, out, cb, grouping) go f.listRRunner(&wg, in, out, cb, grouping)
} }
go func() { go func() {
// wait until the all directories are processed // wait until the all directories are processed
@@ -1638,8 +1612,8 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error
// Creates a drive.File info from the parameters passed in. // Creates a drive.File info from the parameters passed in.
// //
// Used to create new objects // Used to create new objects
func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) { func (f *Fs) createFileInfo(remote string, modTime time.Time) (*drive.File, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1649,6 +1623,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
Name: leaf, Name: leaf,
Description: leaf, Description: leaf,
Parents: []string{directoryID}, Parents: []string{directoryID},
MimeType: fs.MimeTypeFromName(remote),
ModifiedTime: modTime.Format(timeFormatOut), ModifiedTime: modTime.Format(timeFormatOut),
} }
return createInfo, nil return createInfo, nil
@@ -1659,32 +1634,32 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote()) exisitingObj, err := f.NewObject(src.Remote())
switch err { switch err {
case nil: case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...) return exisitingObj, exisitingObj.Update(in, src, options...)
case fs.ErrorObjectNotFound: case fs.ErrorObjectNotFound:
// Not found so create it // Not found so create it
return f.PutUnchecked(ctx, in, src, options...) return f.PutUnchecked(in, src, options...)
default: default:
return nil, err return nil, err
} }
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// PutUnchecked uploads the object // PutUnchecked uploads the object
// //
// This will create a duplicate if we upload a new file without // This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that. // checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
size := src.Size() size := src.Size()
modTime := src.ModTime(ctx) modTime := src.ModTime()
srcMimeType := fs.MimeTypeFromName(remote) srcMimeType := fs.MimeTypeFromName(remote)
srcExt := path.Ext(remote) srcExt := path.Ext(remote)
exportExt := "" exportExt := ""
@@ -1706,14 +1681,12 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
} }
} }
createInfo, err := f.createFileInfo(ctx, remote, modTime) createInfo, err := f.createFileInfo(remote, modTime)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if importMimeType != "" { if importMimeType != "" {
createInfo.MimeType = importMimeType createInfo.MimeType = importMimeType
} else {
createInfo.MimeType = fs.MimeTypeFromName(remote)
} }
var info *drive.File var info *drive.File
@@ -1724,7 +1697,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
info, err = f.svc.Files.Create(createInfo). info, err = f.svc.Files.Create(createInfo).
Media(in, googleapi.ContentType(srcMimeType)). Media(in, googleapi.ContentType(srcMimeType)).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
KeepRevisionForever(f.opt.KeepRevisionForever). KeepRevisionForever(f.opt.KeepRevisionForever).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
@@ -1744,7 +1717,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// MergeDirs merges the contents of all the directories passed // MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories. // in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { func (f *Fs) MergeDirs(dirs []fs.Directory) error {
if len(dirs) < 2 { if len(dirs) < 2 {
return nil return nil
} }
@@ -1752,7 +1725,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
for _, srcDir := range dirs[1:] { for _, srcDir := range dirs[1:] {
// list the the objects // list the the objects
infos := []*drive.File{} infos := []*drive.File{}
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool { _, err := f.list([]string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
infos = append(infos, info) infos = append(infos, info)
return false return false
}) })
@@ -1768,7 +1741,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
RemoveParents(srcDir.ID()). RemoveParents(srcDir.ID()).
AddParents(dstDir.ID()). AddParents(dstDir.ID()).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -1778,7 +1751,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
} }
// rmdir (into trash) the now empty source directory // rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory") fs.Infof(srcDir, "removing empty directory")
err = f.rmdir(ctx, srcDir.ID(), true) err = f.rmdir(srcDir.ID(), true)
if err != nil { if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir) return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
} }
@@ -1787,19 +1760,19 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
err := f.dirCache.FindRoot(ctx, true) err := f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
if dir != "" { if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true) _, err = f.dirCache.FindDir(dir, true)
} }
return err return err
} }
// Rmdir deletes a directory unconditionally by ID // Rmdir deletes a directory unconditionally by ID
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error { func (f *Fs) rmdir(directoryID string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
var err error var err error
if useTrash { if useTrash {
@@ -1808,12 +1781,12 @@ func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error
} }
_, err = f.svc.Files.Update(directoryID, &info). _, err = f.svc.Files.Update(directoryID, &info).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
} else { } else {
err = f.svc.Files.Delete(directoryID). err = f.svc.Files.Delete(directoryID).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
} }
return shouldRetry(err) return shouldRetry(err)
@@ -1823,15 +1796,15 @@ func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error
// Rmdir deletes a directory // Rmdir deletes a directory
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
dc := f.dirCache dc := f.dirCache
directoryID, err := dc.FindDir(ctx, dir, false) directoryID, err := dc.FindDir(dir, false)
if err != nil { if err != nil {
return err return err
} }
var trashedFiles = false var trashedFiles = false
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool { found, err := f.list([]string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed { if !item.Trashed {
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name) fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
return true return true
@@ -1850,7 +1823,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// trash the directory if it had trashed files // trash the directory if it had trashed files
// in or the user wants to trash, otherwise // in or the user wants to trash, otherwise
// delete it. // delete it.
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash) err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
if err != nil { if err != nil {
return err return err
} }
@@ -1876,7 +1849,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject var srcObj *baseObject
ext := "" ext := ""
switch src := src.(type) { switch src := src.(type) {
@@ -1899,10 +1872,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote = remote[:len(remote)-len(ext)] remote = remote[:len(remote)-len(ext)]
} }
// Look to see if there is an existing object createInfo, err := f.createFileInfo(remote, src.ModTime())
existingObject, _ := f.NewObject(ctx, remote)
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1911,7 +1881,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(srcObj.id, createInfo). info, err = f.svc.Files.Copy(srcObj.id, createInfo).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
KeepRevisionForever(f.opt.KeepRevisionForever). KeepRevisionForever(f.opt.KeepRevisionForever).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
@@ -1919,17 +1889,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil { if err != nil {
return nil, err return nil, err
} }
newObject, err := f.newObjectWithInfo(remote, info) return f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
if existingObject != nil {
err = existingObject.Remove(ctx)
if err != nil {
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
return newObject, nil
} }
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -1937,11 +1897,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
if f.root == "" { if f.root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
err := f.dirCache.FindRoot(ctx, false) err := f.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
@@ -1952,12 +1912,12 @@ func (f *Fs) Purge(ctx context.Context) error {
} }
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info). _, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
} else { } else {
err = f.svc.Files.Delete(f.dirCache.RootID()). err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
} }
return shouldRetry(err) return shouldRetry(err)
@@ -1970,7 +1930,7 @@ func (f *Fs) Purge(ctx context.Context) error {
} }
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error { func (f *Fs) CleanUp() error {
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
err := f.svc.Files.EmptyTrash().Do() err := f.svc.Files.EmptyTrash().Do()
return shouldRetry(err) return shouldRetry(err)
@@ -1983,7 +1943,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
if f.isTeamDrive { if f.isTeamDrive {
// Teamdrives don't appear to have a usage API so just return empty // Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil return &fs.Usage{}, nil
@@ -2019,7 +1979,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject var srcObj *baseObject
ext := "" ext := ""
switch src := src.(type) { switch src := src.(type) {
@@ -2042,13 +2002,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote = remote[:len(remote)-len(ext)] remote = remote[:len(remote)-len(ext)]
} }
_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) _, srcParentID, err := srcObj.fs.dirCache.FindPath(src.Remote(), false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Temporary Object under construction // Temporary Object under construction
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx)) dstInfo, err := f.createFileInfo(remote, src.ModTime())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -2062,7 +2022,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
RemoveParents(srcParentID). RemoveParents(srcParentID).
AddParents(dstParents). AddParents(dstParents).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2074,13 +2034,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
id, err := f.dirCache.FindDir(ctx, remote, false) id, err := f.dirCache.FindDir(remote, false)
if err == nil { if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote) fs.Debugf(f, "attempting to share directory '%s'", remote)
} else { } else {
fs.Debugf(f, "attempting to share single file '%s'", remote) fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote) o, err := f.NewObject(remote)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -2098,7 +2058,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present. // Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
_, err = f.svc.Permissions.Create(id, permission). _, err = f.svc.Permissions.Create(id, permission).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2116,7 +2076,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -2132,14 +2092,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// find the root src directory // find the root src directory
err := srcFs.dirCache.FindRoot(ctx, false) err := srcFs.dirCache.FindRoot(false)
if err != nil { if err != nil {
return err return err
} }
// find the root dst directory // find the root dst directory
if dstRemote != "" { if dstRemote != "" {
err = f.dirCache.FindRoot(ctx, true) err = f.dirCache.FindRoot(true)
if err != nil { if err != nil {
return err return err
} }
@@ -2155,14 +2115,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if dstRemote == "" { if dstRemote == "" {
findPath = f.root findPath = f.root
} }
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true) leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
if err != nil { if err != nil {
return err return err
} }
// Check destination does not exist // Check destination does not exist
if dstRemote != "" { if dstRemote != "" {
_, err = f.dirCache.FindDir(ctx, dstRemote, false) _, err = f.dirCache.FindDir(dstRemote, false)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -2177,14 +2137,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if srcRemote == "" { if srcRemote == "" {
srcDirectoryID, err = srcFs.dirCache.RootParentID() srcDirectoryID, err = srcFs.dirCache.RootParentID()
} else { } else {
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false) _, srcDirectoryID, err = srcFs.dirCache.FindPath(srcRemote, false)
} }
if err != nil { if err != nil {
return err return err
} }
// Find ID of src // Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
if err != nil { if err != nil {
return err return err
} }
@@ -2198,7 +2158,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
RemoveParents(srcDirectoryID). RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID). AddParents(dstDirectoryID).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2215,7 +2175,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Automatically restarts itself in case of unexpected behavior of the remote. // Automatically restarts itself in case of unexpected behavior of the remote.
// //
// Close the returned channel to stop being notified. // Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() { go func() {
// get the StartPageToken early so all changes from now on get processed // get the StartPageToken early so all changes from now on get processed
startPageToken, err := f.changeNotifyStartPageToken() startPageToken, err := f.changeNotifyStartPageToken()
@@ -2262,7 +2222,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
startPageToken, err = f.svc.Changes.GetStartPageToken(). startPageToken, err = f.svc.Changes.GetStartPageToken().
SupportsAllDrives(true). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2283,10 +2243,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
if f.opt.ListChunk > 0 { if f.opt.ListChunk > 0 {
changesCall.PageSize(f.opt.ListChunk) changesCall.PageSize(f.opt.ListChunk)
} }
changesCall.SupportsAllDrives(true)
changesCall.IncludeItemsFromAllDrives(true)
if f.isTeamDrive { if f.isTeamDrive {
changesCall.TeamDriveId(f.opt.TeamDriveID) changesCall.TeamDriveId(f.opt.TeamDriveID)
changesCall.SupportsTeamDrives(true)
changesCall.IncludeTeamDriveItems(true)
} }
changeList, err = changesCall.Do() changeList, err = changesCall.Do()
return shouldRetry(err) return shouldRetry(err)
@@ -2390,13 +2350,13 @@ func (o *baseObject) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
return o.md5sum, nil return o.md5sum, nil
} }
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *baseObject) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -2409,15 +2369,15 @@ func (o *baseObject) Size() int64 {
} }
// getRemoteInfo returns a drive.File for the remote // getRemoteInfo returns a drive.File for the remote
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) { func (f *Fs) getRemoteInfo(remote string) (info *drive.File, err error) {
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote) info, _, _, _, _, err = f.getRemoteInfoWithExport(remote)
return return
} }
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote // getRemoteInfoWithExport returns a drive.File and the export settings for the remote
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) ( func (f *Fs) getRemoteInfoWithExport(remote string) (
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) { info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false) leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, false)
if err != nil { if err != nil {
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
return nil, "", "", "", false, fs.ErrorObjectNotFound return nil, "", "", "", false, fs.ErrorObjectNotFound
@@ -2425,7 +2385,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
return nil, "", "", "", false, err return nil, "", "", "", false, err
} }
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool { found, err := f.list([]string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs { if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item) extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf { if exportName == leaf {
@@ -2456,7 +2416,7 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *baseObject) ModTime(ctx context.Context) time.Time { func (o *baseObject) ModTime() time.Time {
modTime, err := time.Parse(timeFormatIn, o.modifiedDate) modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read mtime from object: %v", err) fs.Debugf(o, "Failed to read mtime from object: %v", err)
@@ -2466,7 +2426,7 @@ func (o *baseObject) ModTime(ctx context.Context) time.Time {
} }
// SetModTime sets the modification time of the drive fs object // SetModTime sets the modification time of the drive fs object
func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error { func (o *baseObject) SetModTime(modTime time.Time) error {
// New metadata // New metadata
updateInfo := &drive.File{ updateInfo := &drive.File{
ModifiedTime: modTime.Format(timeFormatOut), ModifiedTime: modTime.Format(timeFormatOut),
@@ -2477,7 +2437,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
var err error var err error
info, err = o.fs.svc.Files.Update(o.id, updateInfo). info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsTeamDrives(o.fs.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2599,13 +2559,13 @@ func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadClose
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.v2Download { if o.v2Download {
var v2File *drive_v2.File var v2File *drive_v2.File
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
v2File, err = o.fs.v2Svc.Files.Get(o.id). v2File, err = o.fs.v2Svc.Files.Get(o.id).
Fields("downloadUrl"). Fields("downloadUrl").
SupportsAllDrives(true). SupportsTeamDrives(o.fs.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2617,7 +2577,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
} }
return o.baseObject.open(o.url, options...) return o.baseObject.open(o.url, options...)
} }
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
// Update the size with what we are reading as it can change from // Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking // the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted. // the transfer as corrupted.
@@ -2649,7 +2609,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
} }
return return
} }
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *linkObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
var data = o.content var data = o.content
for _, option := range options { for _, option := range options {
@@ -2685,7 +2645,7 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io
info, err = o.fs.svc.Files.Update(o.id, updateInfo). info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)). Media(in, googleapi.ContentType(uploadMimeType)).
Fields(partialFields). Fields(partialFields).
SupportsAllDrives(true). SupportsTeamDrives(o.fs.isTeamDrive).
KeepRevisionForever(o.fs.opt.KeepRevisionForever). KeepRevisionForever(o.fs.opt.KeepRevisionForever).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
@@ -2701,11 +2661,11 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src) srcMimeType := fs.MimeType(src)
updateInfo := &drive.File{ updateInfo := &drive.File{
MimeType: srcMimeType, MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut), ModifiedTime: src.ModTime().Format(timeFormatOut),
} }
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src) info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
if err != nil { if err != nil {
@@ -2724,12 +2684,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return nil return nil
} }
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
srcMimeType := fs.MimeType(ctx, src) srcMimeType := fs.MimeType(src)
importMimeType := "" importMimeType := ""
updateInfo := &drive.File{ updateInfo := &drive.File{
MimeType: srcMimeType, MimeType: srcMimeType,
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut), ModifiedTime: src.ModTime().Format(timeFormatOut),
} }
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs { if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
@@ -2766,12 +2726,12 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
return nil return nil
} }
func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *linkObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errors.New("cannot update link files") return errors.New("cannot update link files")
} }
// Remove an object // Remove an object
func (o *baseObject) Remove(ctx context.Context) error { func (o *baseObject) Remove() error {
var err error var err error
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
if o.fs.opt.UseTrash { if o.fs.opt.UseTrash {
@@ -2780,12 +2740,12 @@ func (o *baseObject) Remove(ctx context.Context) error {
} }
_, err = o.fs.svc.Files.Update(o.id, &info). _, err = o.fs.svc.Files.Update(o.id, &info).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(o.fs.isTeamDrive).
Do() Do()
} else { } else {
err = o.fs.svc.Files.Delete(o.id). err = o.fs.svc.Files.Delete(o.id).
Fields(""). Fields("").
SupportsAllDrives(true). SupportsTeamDrives(o.fs.isTeamDrive).
Do() Do()
} }
return shouldRetry(err) return shouldRetry(err)
@@ -2794,7 +2754,7 @@ func (o *baseObject) Remove(ctx context.Context) error {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *baseObject) MimeType(ctx context.Context) string { func (o *baseObject) MimeType() string {
return o.mimeType return o.mimeType
} }

View File

@@ -1,8 +1,9 @@
// +build go1.9
package drive package drive
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -11,11 +12,11 @@ import (
"strings" "strings"
"testing" "testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/operations"
"github.com/ncw/rclone/fstest/fstests"
"github.com/pkg/errors" "github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
@@ -196,7 +197,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc") err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -210,7 +211,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc") _, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
require.NoError(t, err) require.NoError(t, err)
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods") err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
require.NoError(t, err) require.NoError(t, err)
} }
@@ -221,10 +222,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("txt") f.exportExtensions, _, err = parseExtensions("txt")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.txt") obj, err := f.NewObject("example2.txt")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open(context.Background()) rc, err := obj.Open()
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()
@@ -247,10 +248,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
f.exportExtensions, _, err = parseExtensions("link.html") f.exportExtensions, _, err = parseExtensions("link.html")
require.NoError(t, err) require.NoError(t, err)
obj, err := f.NewObject(context.Background(), "example2.link.html") obj, err := f.NewObject("example2.link.html")
require.NoError(t, err) require.NoError(t, err)
rc, err := obj.Open(context.Background()) rc, err := obj.Open()
require.NoError(t, err) require.NoError(t, err)
defer func() { require.NoError(t, rc.Close()) }() defer func() { require.NoError(t, rc.Close()) }()

View File

@@ -1,12 +1,14 @@
// Test Drive filesystem interface // Test Drive filesystem interface
// +build go1.9
package drive package drive
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -8,6 +8,8 @@
// //
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS // This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive package drive
import ( import (
@@ -19,10 +21,10 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/readers"
"google.golang.org/api/drive/v3" "google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
) )
@@ -56,7 +58,9 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
"uploadType": {"resumable"}, "uploadType": {"resumable"},
"fields": {partialFields}, "fields": {partialFields},
} }
params.Set("supportsAllDrives", "true") if f.isTeamDrive {
params.Set("supportsTeamDrives", "true")
}
if f.opt.KeepRevisionForever { if f.opt.KeepRevisionForever {
params.Set("keepRevisionForever", "true") params.Set("keepRevisionForever", "true")
} }

View File

@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/rclone/rclone/backend/dropbox/dbhash" "github.com/ncw/rclone/backend/dropbox/dbhash"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )

View File

@@ -22,7 +22,6 @@ of path_display and all will be well.
*/ */
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -38,17 +37,17 @@ import (
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users" "github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -442,7 +441,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -455,7 +454,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
root := f.slashRoot root := f.slashRoot
if dir != "" { if dir != "" {
root += "/" + dir root += "/" + dir
@@ -542,22 +541,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root // can't create or run metadata on root
@@ -587,7 +586,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir deletes the container // Rmdir deletes the container
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
root := path.Join(f.slashRoot, dir) root := path.Join(f.slashRoot, dir)
// can't remove root // can't remove root
@@ -643,7 +642,7 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
@@ -688,7 +687,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) (err error) { func (f *Fs) Purge() (err error) {
// Let dropbox delete the filesystem tree // Let dropbox delete the filesystem tree
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot}) _, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
@@ -706,7 +705,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -746,7 +745,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
absPath := "/" + path.Join(f.Root(), remote) absPath := "/" + path.Join(f.Root(), remote)
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath) fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
createArg := sharing.CreateSharedLinkWithSettingsArg{ createArg := sharing.CreateSharedLinkWithSettingsArg{
@@ -799,7 +798,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -835,7 +834,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { func (f *Fs) About() (usage *fs.Usage, err error) {
var q *users.SpaceUsage var q *users.SpaceUsage
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
q, err = f.users.GetSpaceUsage() q, err = f.users.GetSpaceUsage()
@@ -887,7 +886,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the dropbox special hash // Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.Dropbox { if t != hash.Dropbox {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -949,7 +948,7 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
fs.Debugf(o, "Failed to read metadata: %v", err) fs.Debugf(o, "Failed to read metadata: %v", err)
@@ -961,7 +960,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
// //
// Commits the datastore // Commits the datastore
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
// Dropbox doesn't have a way of doing this so returning this // Dropbox doesn't have a way of doing this so returning this
// error will cause the file to be deleted first then // error will cause the file to be deleted first then
// re-uploaded to set the time. // re-uploaded to set the time.
@@ -974,7 +973,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers} arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
@@ -1100,7 +1099,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
remote := o.remotePath() remote := o.remotePath()
if ignoredFiles.MatchString(remote) { if ignoredFiles.MatchString(remote) {
fs.Logf(o, "File name disallowed - not uploading") fs.Logf(o, "File name disallowed - not uploading")
@@ -1109,7 +1108,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
commitInfo := files.NewCommitInfo(o.remotePath()) commitInfo := files.NewCommitInfo(o.remotePath())
commitInfo.Mode.Tag = "overwrite" commitInfo.Mode.Tag = "overwrite"
// The Dropbox API only accepts timestamps in UTC with second precision. // The Dropbox API only accepts timestamps in UTC with second precision.
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second) commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
size := src.Size() size := src.Size()
var err error var err error
@@ -1129,7 +1128,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()}) _, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
return shouldRetry(err) return shouldRetry(err)

View File

@@ -4,8 +4,8 @@ package dropbox
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,386 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
request := DownloadRequest{
URL: url,
Single: 1,
}
opts := rest.Opts{
Method: "POST",
Path: "/download/get_token.cgi",
}
var token GetTokenResponse
err := f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, &token)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return &token, nil
}
func fileFromSharedFile(file *SharedFile) File {
return File{
URL: file.Link,
Filename: file.Filename,
Size: file.Size,
}
}
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
entries = make([]fs.DirEntry, len(sharedFiles))
for i, sharedFile := range sharedFiles {
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
}
return entries, nil
}
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
request := ListFilesRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/file/ls.cgi",
}
filesList = &FilesList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, filesList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list files")
}
return filesList, nil
}
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
request := ListFolderRequest{
FolderID: directoryID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/ls.cgi",
}
foldersList = &FoldersList{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list folders")
}
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
return foldersList, err
}
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
folders, err := f.listFolders(folderID)
if err != nil {
return nil, err
}
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
for i, item := range files.Items {
entries[i] = f.newObjectFromFile(ctx, dir, item)
}
for i, folder := range folders.SubFolders {
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
if err != nil {
return nil, err
}
folder.Name = restoreReservedChars(folder.Name)
fullPath := getRemote(dir, folder.Name)
folderID := strconv.Itoa(folder.ID)
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
f.dirCache.Put(fullPath, folderID)
}
return entries, nil
}
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
return &Object{
fs: f,
remote: getRemote(dir, item.Filename),
file: item,
}
}
func getRemote(dir, fileName string) string {
if dir == "" {
return fileName
}
return dir + "/" + fileName
}
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
name := replaceReservedChars(leaf)
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
request := MakeFolderRequest{
FolderID: folderID,
Name: name,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/mkdir.cgi",
}
response = &MakeFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, &request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create folder")
}
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
return response, err
}
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
request := &RemoveFolderRequest{
FolderID: folderID,
}
opts := rest.Opts{
Method: "POST",
Path: "/folder/rm.cgi",
}
response = &GenericOKResponse{}
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
return response, nil
}
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
request := &RemoveFileRequest{
Files: []RmFile{
{url},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rm.cgi",
}
response = &GenericOKResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, request, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't remove file")
}
// fs.Debugf(f, "Removed file with url `%s`", url)
return response, nil
}
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")
opts := rest.Opts{
Method: "GET",
ContentType: "application/json", // 1Fichier API is bad
Path: "/upload/get_upload_server.cgi",
}
response = &GetUploadNodeResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "didnt got an upload node")
}
// fs.Debugf(f, "Got Upload node")
return response, err
}
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "POST",
Path: "/upload.cgi",
Parameters: map[string][]string{
"id": {uploadID},
},
NoResponse: true,
Body: in,
ContentLength: &size,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{
"did": {folderID},
},
}
if node != "" {
opts.RootURL = "https://" + node
}
err = f.pacer.CallNoRetry(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, nil)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't upload file")
}
// fs.Debugf(f, "Uploaded File `%s`", fileName)
return response, err
}
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
return nil, errors.New("Invalid UploadID")
}
opts := rest.Opts{
Method: "GET",
Path: "/end.pl",
RootURL: "https://" + nodeurl,
Parameters: map[string][]string{
"xid": {uploadID},
},
ExtraHeaders: map[string]string{
"JSON": "1",
},
}
response = &EndFileUploadResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(&opts, nil, response)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't finish file upload")
}
return response, err
}

View File

@@ -1,411 +0,0 @@
package fichier
import (
"context"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
Config: func(name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{
{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
},
{
Help: "If you want to download a shared folder, add this parameter",
Name: "shared_folder",
Required: false,
Advanced: true,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
}
// Fs is the interface a cloud storage system must provide
type Fs struct {
root string
name string
features *fs.Features
dirCache *dircache.DirCache
baseClient *http.Client
options *Options
pacer *fs.Pacer
rest *rest.Client
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", false, err
}
folders, err := f.listFolders(folderID)
if err != nil {
return "", false, err
}
for _, folder := range folders.SubFolders {
if folder.Name == leaf {
pathIDOut := strconv.Itoa(folder.ID)
return pathIDOut, true, nil
}
}
return "", false, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
folderID, err := strconv.Atoi(pathID)
if err != nil {
return "", err
}
resp, err := f.makeFolder(leaf, folderID)
if err != nil {
return "", err
}
return strconv.Itoa(resp.FolderID), err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("1Fichier root '%s'", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.Whirlpool)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// NewFs makes a new Fs object from the path
//
// The path is of the form remote:path
//
// Remotes are looked up in the config file. If the remote isn't
// found then NotFoundInConfigFile will be returned.
//
// On Windows avoid single character remote names as they can be mixed
// up with drive letters.
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
root := replaceReservedChars(rootleaf)
opt := new(Options)
err := configstruct.Set(config, opt)
if err != nil {
return nil, err
}
// If using a Shared Folder override root
if opt.SharedFolder != "" {
root = ""
}
//workaround for wonky parser
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
options: opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
client := fshttp.NewClient(fs.Config)
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
f.dirCache = dircache.New(root, rootID, f)
ctx := context.Background()
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)
if err != nil {
// No root so return old f
return f, nil
}
_, err := tempF.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound {
// File doesn't exist so return old f
return f, nil
}
return nil, err
}
f.features.Fill(&tempF)
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
// See https://github.com/rclone/rclone/issues/2182
f.dirCache = tempF.dirCache
f.root = tempF.root
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.options.SharedFolder != "" {
return f.listSharedFiles(ctx, f.options.SharedFolder)
}
dirContent, err := f.listDir(ctx, dir)
if err != nil {
return nil, err
}
return dirContent, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
files, err := f.listFiles(folderID)
if err != nil {
return nil, err
}
for _, file := range files.Items {
if file.Filename == leaf {
path, ok := f.dirCache.GetInv(directoryID)
if !ok {
return nil, errors.New("Cannot find dir in dircache")
}
return f.newObjectFromFile(ctx, path, file), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
// return an error or upload it properly (rather than e.g. calling panic).
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
default:
return nil, err
}
}
// putUnchecked uploads the object with the given name and size
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(100E9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
nodeResponse, err := f.getUploadNode()
if err != nil {
return nil, err
}
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
if err != nil {
return nil, err
}
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}
if len(fileUploadResponse.Links) != 1 {
return nil, errors.New("unexpected amount of files")
}
link := fileUploadResponse.Links[0]
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
if err != nil {
return nil, err
}
return &Object{
fs: f,
remote: remote,
file: File{
ACL: 0,
CDN: 0,
Checksum: link.Whirlpool,
ContentType: "",
Date: time.Now().Format("2006-01-02 15:04:05"),
Filename: link.Filename,
Pass: 0,
Size: int(fileSize),
URL: link.Download,
},
}, nil
}
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, true)
if err != nil {
return err
}
if dir != "" {
_, err = f.dirCache.FindDir(ctx, dir, true)
}
return err
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
err := f.dirCache.FindRoot(ctx, false)
if err != nil {
return err
}
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return err
}
_, err = f.removeFolder(dir, folderID)
if err != nil {
return err
}
f.dirCache.FlushDir(dir)
return nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ dircache.DirCacher = (*Fs)(nil)
)

View File

@@ -1,17 +0,0 @@
// Test 1Fichier filesystem interface
package fichier
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fs.Config.LogLevel = fs.LogLevelDebug
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFichier:",
})
}

View File

@@ -1,158 +0,0 @@
package fichier
import (
"context"
"io"
"net/http"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
)
// Object is a filesystem like object provided by an Fs
type Object struct {
fs *Fs
remote string
file File
}
// String returns a description of the Object
func (o *Object) String() string {
return o.file.Filename
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
if err != nil {
return time.Now()
}
return modTime
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.file.Size)
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.Whirlpool {
return "", hash.ErrUnsupported
}
return o.file.Checksum, nil
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(context.Context, time.Time) error {
return fs.ErrorCantSetModTime
//return errors.New("setting modtime is not supported for 1fichier remotes")
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, int64(o.file.Size))
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
if err != nil {
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: downloadToken.URL,
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.rest.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() < 0 {
return errors.New("refusing to update with unknown size")
}
// upload with new size but old name
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
if err != nil {
return err
}
// Delete duplicate after successful upload
err = o.Remove(ctx)
if err != nil {
return errors.Wrap(err, "failed to remove old version")
}
// Replace guts of old object with new one
*o = *info.(*Object)
return nil
}
// Remove removes this object
func (o *Object) Remove(ctx context.Context) error {
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
_, err := o.fs.deleteFile(o.file.URL)
if err != nil {
return err
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.file.ContentType
}
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.file.URL
}
// Check the interfaces are satisfied
var (
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,71 +0,0 @@
/*
Translate file names for 1fichier
1Fichier reserved characters
The following characters are 1Fichier reserved characters, and can't
be used in 1Fichier folder and file names.
*/
package fichier
import (
"regexp"
"strings"
)
// charMap holds replacements for characters
//
// 1Fichier has a restricted set of characters compared to other cloud
// storage systems, so we to map these to the FULLWIDTH unicode
// equivalents
//
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
var (
charMap = map[rune]rune{
'\\': '', // FULLWIDTH REVERSE SOLIDUS
'<': '', // FULLWIDTH LESS-THAN SIGN
'>': '', // FULLWIDTH GREATER-THAN SIGN
'"': '', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
'\'': '', // FULLWIDTH APOSTROPHE
'$': '', // FULLWIDTH DOLLAR SIGN
'`': '', // FULLWIDTH GRAVE ACCENT
' ': '␠', // SYMBOL FOR SPACE
}
invCharMap map[rune]rune
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
)
func init() {
// Create inverse charMap
invCharMap = make(map[rune]rune, len(charMap))
for k, v := range charMap {
invCharMap[v] = k
}
}
// replaceReservedChars takes a path and substitutes any reserved
// characters in it
func replaceReservedChars(in string) string {
// file names can't start with space either
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
// Replace reserved characters
return strings.Map(func(c rune) rune {
if replacement, ok := charMap[c]; ok && c != ' ' {
return replacement
}
return c
}, in)
}
// restoreReservedChars takes a path and undoes any substitutions
// made by replaceReservedChars
func restoreReservedChars(in string) string {
return strings.Map(func(c rune) rune {
if replacement, ok := invCharMap[c]; ok {
return replacement
}
return c
}, in)
}

View File

@@ -1,24 +0,0 @@
package fichier
import "testing"
func TestReplace(t *testing.T) {
for _, test := range []struct {
in string
out string
}{
{"", ""},
{"abc 123", "abc 123"},
{"\"'<>/\\$`", `/`},
{" leading space", "␠leading space"},
} {
got := replaceReservedChars(test.in)
if got != test.out {
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
}
got2 := restoreReservedChars(got)
if got2 != test.in {
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
}
}
}

View File

@@ -1,120 +0,0 @@
package fichier
// ListFolderRequest is the request structure of the corresponding request
type ListFolderRequest struct {
FolderID int `json:"folder_id"`
}
// ListFilesRequest is the request structure of the corresponding request
type ListFilesRequest struct {
FolderID int `json:"folder_id"`
}
// DownloadRequest is the request structure of the corresponding request
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
}
// RemoveFolderRequest is the request structure of the corresponding request
type RemoveFolderRequest struct {
FolderID int `json:"folder_id"`
}
// RemoveFileRequest is the request structure of the corresponding request
type RemoveFileRequest struct {
Files []RmFile `json:"files"`
}
// RmFile is the request structure of the corresponding request
type RmFile struct {
URL string `json:"url"`
}
// GenericOKResponse is the response structure of the corresponding request
type GenericOKResponse struct {
Status string `json:"status"`
Message string `json:"message"`
}
// MakeFolderRequest is the request structure of the corresponding request
type MakeFolderRequest struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// MakeFolderResponse is the response structure of the corresponding request
type MakeFolderResponse struct {
Name string `json:"name"`
FolderID int `json:"folder_id"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`
URL string `json:"url"`
}
// GetTokenResponse is the response structure of the corresponding request
type GetTokenResponse struct {
URL string `json:"url"`
Status string `json:"Status"`
Message string `json:"Message"`
}
// SharedFolderResponse is the response structure of the corresponding request
type SharedFolderResponse []SharedFile
// SharedFile is the structure how 1Fichier returns a shared File
type SharedFile struct {
Filename string `json:"filename"`
Link string `json:"link"`
Size int `json:"size"`
}
// EndFileUploadResponse is the response structure of the corresponding request
type EndFileUploadResponse struct {
Incoming int `json:"incoming"`
Links []struct {
Download string `json:"download"`
Filename string `json:"filename"`
Remove string `json:"remove"`
Size string `json:"size"`
Whirlpool string `json:"whirlpool"`
} `json:"links"`
}
// File is the structure how 1Fichier returns a File
type File struct {
ACL int `json:"acl"`
CDN int `json:"cdn"`
Checksum string `json:"checksum"`
ContentType string `json:"content-type"`
Date string `json:"date"`
Filename string `json:"filename"`
Pass int `json:"pass"`
Size int `json:"size"`
URL string `json:"url"`
}
// FilesList is the structure how 1Fichier returns a list of files
type FilesList struct {
Items []File `json:"items"`
Status string `json:"Status"`
}
// Folder is the structure how 1Fichier returns a Folder
type Folder struct {
CreateDate string `json:"create_date"`
ID int `json:"id"`
Name string `json:"name"`
Pass int `json:"pass"`
}
// FoldersList is the structure how 1Fichier returns a list of Folders
type FoldersList struct {
FolderID int `json:"folder_id"`
Name string `json:"name"`
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}

View File

@@ -2,8 +2,6 @@
package ftp package ftp
import ( import (
"context"
"crypto/tls"
"io" "io"
"net/textproto" "net/textproto"
"os" "os"
@@ -12,14 +10,14 @@ import (
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
) )
// Register with Fs // Register with Fs
@@ -48,20 +46,11 @@ func init() {
Help: "FTP password", Help: "FTP password",
IsPassword: true, IsPassword: true,
Required: true, Required: true,
}, {
Name: "tls",
Help: "Use FTP over TLS (Implicit)",
Default: false,
}, { }, {
Name: "concurrency", Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited", Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0, Default: 0,
Advanced: true, Advanced: true,
}, {
Name: "no_check_certificate",
Help: "Do not verify the TLS certificate of the server",
Default: false,
Advanced: true,
}, },
}, },
}) })
@@ -73,9 +62,7 @@ type Options struct {
User string `config:"user"` User string `config:"user"`
Pass string `config:"pass"` Pass string `config:"pass"`
Port string `config:"port"` Port string `config:"port"`
TLS bool `config:"tls"`
Concurrency int `config:"concurrency"` Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
} }
// Fs represents a remote FTP server // Fs represents a remote FTP server
@@ -133,15 +120,7 @@ func (f *Fs) Features() *fs.Features {
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) { func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)} c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
if f.opt.TLS {
tlsConfig := &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
}
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
if err != nil { if err != nil {
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err) fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
return nil, errors.Wrap(err, "ftpConnection Dial") return nil, errors.Wrap(err, "ftpConnection Dial")
@@ -203,7 +182,6 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) { func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
ctx := context.Background()
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err) // defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
// Parse config into Options struct // Parse config into Options struct
opt := new(Options) opt := new(Options)
@@ -225,11 +203,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
dialAddr := opt.Host + ":" + port dialAddr := opt.Host + ":" + port
protocol := "ftp://" u := "ftp://" + path.Join(dialAddr+"/", root)
if opt.TLS {
protocol = "ftps://"
}
u := protocol + path.Join(dialAddr+"/", root)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
@@ -256,7 +230,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(ctx, remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -321,7 +295,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err) // defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(remote) entry, err := f.findItem(remote)
if err != nil { if err != nil {
@@ -365,42 +339,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
// defer fs.Trace(dir, "curlevel=%d", curlevel)("") // defer fs.Trace(dir, "curlevel=%d", curlevel)("")
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
files, err := c.List(path.Join(f.root, dir))
var listErr error
var files []*ftp.Entry
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
errchan <- err return nil, translateErrorDir(err)
return
} }
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
}
// Annoyingly FTP returns success for a directory which // Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.
@@ -455,7 +404,7 @@ func (f *Fs) Precision() time.Duration {
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// fs.Debugf(f, "Trying to put file %s", src.Remote()) // fs.Debugf(f, "Trying to put file %s", src.Remote())
err := f.mkParentDir(src.Remote()) err := f.mkParentDir(src.Remote())
if err != nil { if err != nil {
@@ -465,13 +414,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
err = o.Update(ctx, in, src, options...) err = o.Update(in, src, options...)
return o, err return o, err
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// getInfo reads the FileInfo for a path // getInfo reads the FileInfo for a path
@@ -549,7 +498,7 @@ func (f *Fs) mkParentDir(remote string) error {
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(dir string) (err error) {
// defer fs.Trace(dir, "")("err=%v", &err) // defer fs.Trace(dir, "")("err=%v", &err)
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
return f.mkdir(root) return f.mkdir(root)
@@ -558,7 +507,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// Rmdir removes the directory (container, bucket) if empty // Rmdir removes the directory (container, bucket) if empty
// //
// Return an error if it doesn't exist or isn't empty // Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
c, err := f.getFtpConnection() c, err := f.getFtpConnection()
if err != nil { if err != nil {
return errors.Wrap(translateErrorFile(err), "Rmdir") return errors.Wrap(translateErrorFile(err), "Rmdir")
@@ -569,7 +518,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
// Move renames a remote file object // Move renames a remote file object
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -591,7 +540,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move Rename failed") return nil, errors.Wrap(err, "Move Rename failed")
} }
dstObj, err := f.NewObject(ctx, remote) dstObj, err := f.NewObject(remote)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Move NewObject failed") return nil, errors.Wrap(err, "Move NewObject failed")
} }
@@ -606,7 +555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -669,7 +618,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the hash of an object returning a lowercase hex string // Hash returns the hash of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -679,12 +628,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.info.ModTime return o.info.ModTime
} }
// SetModTime sets the modification time of the object // SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return nil return nil
} }
@@ -745,7 +694,7 @@ func (f *ftpReadCloser) Close() error {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err) // defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
@@ -779,7 +728,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
// Copy the reader into the object updating modTime and size // Copy the reader into the object updating modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
// defer fs.Trace(o, "src=%v", src)("err=%v", &err) // defer fs.Trace(o, "src=%v", src)("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// remove the file if upload failed // remove the file if upload failed
@@ -789,7 +738,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been // may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
// able to think of a better method to find out if the server has finished - ncw // able to think of a better method to find out if the server has finished - ncw
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
removeErr := o.Remove(ctx) removeErr := o.Remove()
if removeErr != nil { if removeErr != nil {
fs.Debugf(o, "Failed to remove: %v", removeErr) fs.Debugf(o, "Failed to remove: %v", removeErr)
} else { } else {
@@ -815,7 +764,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
// defer fs.Trace(o, "")("err=%v", &err) // defer fs.Trace(o, "")("err=%v", &err)
path := path.Join(o.fs.root, o.remote) path := path.Join(o.fs.root, o.remote)
// Check if it's a directory or a file // Check if it's a directory or a file
@@ -824,7 +773,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
return err return err
} }
if info.IsDir { if info.IsDir {
err = o.fs.Rmdir(ctx, o.remote) err = o.fs.Rmdir(o.remote)
} else { } else {
c, err := o.fs.getFtpConnection() c, err := o.fs.getFtpConnection()
if err != nil { if err != nil {

View File

@@ -4,8 +4,8 @@ package ftp_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/ftp" "github.com/ncw/rclone/backend/ftp"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,4 +1,7 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage // Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage package googlecloudstorage
/* /*
@@ -28,18 +31,18 @@ import (
"sync" "sync"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@@ -61,7 +64,7 @@ const (
var ( var (
// Description of how to auth for this app // Description of how to auth for this app
storageConfig = &oauth2.Config{ storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope}, Scopes: []string{storage.DevstorageFullControlScope},
Endpoint: google.Endpoint, Endpoint: google.Endpoint,
ClientID: rcloneClientID, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -473,7 +476,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -485,7 +488,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
// dir is the starting directory, "" for root // dir is the starting directory, "" for root
// //
// Set recurse to read sub directories // Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) { func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
root := f.root root := f.root
rootLength := len(root) rootLength := len(root)
if dir != "" { if dir != "" {
@@ -574,9 +577,9 @@ func (f *Fs) markBucketOK() {
} }
// listDir lists a single directory // listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects // List the objects
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error { err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -633,11 +636,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" { if f.bucket == "" {
return f.listBuckets(dir) return f.listBuckets(dir)
} }
return f.listDir(ctx, dir) return f.listDir(dir)
} }
// ListR lists the objects and directories of the Fs starting // ListR lists the objects and directories of the Fs starting
@@ -656,12 +659,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" { if f.bucket == "" {
return fs.ErrorListBucketRequired return fs.ErrorListBucketRequired
} }
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error { err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory) entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil { if err != nil {
return err return err
@@ -681,22 +684,22 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction // Temporary Object under construction
o := &Object{ o := &Object{
fs: f, fs: f,
remote: src.Remote(), remote: src.Remote(),
} }
return o, o.Update(ctx, in, src, options...) return o, o.Update(in, src, options...)
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the bucket if it doesn't exist // Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { func (f *Fs) Mkdir(dir string) (err error) {
f.bucketOKMu.Lock() f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock() defer f.bucketOKMu.Unlock()
if f.bucketOK { if f.bucketOK {
@@ -755,7 +758,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// //
// Returns an error if it isn't empty: Error 409: The bucket you tried // Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty. // to delete was not empty.
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { func (f *Fs) Rmdir(dir string) (err error) {
f.bucketOKMu.Lock() f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock() defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" { if f.root != "" || dir != "" {
@@ -785,8 +788,8 @@ func (f *Fs) Precision() time.Duration {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "") err := f.Mkdir("")
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -845,7 +848,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the Md5sum of an object returning a lowercase hex string // Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -919,7 +922,7 @@ func (o *Object) readMetaData() (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
// fs.Logf(o, "Failed to read metadata: %v", err) // fs.Logf(o, "Failed to read metadata: %v", err)
@@ -936,7 +939,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { func (o *Object) SetModTime(modTime time.Time) (err error) {
// This only adds metadata so will perserve other metadata // This only adds metadata so will perserve other metadata
object := storage.Object{ object := storage.Object{
Bucket: o.fs.bucket, Bucket: o.fs.bucket,
@@ -961,7 +964,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
req, err := http.NewRequest("GET", o.url, nil) req, err := http.NewRequest("GET", o.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -992,17 +995,18 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir(ctx, "") err := o.fs.Mkdir("")
if err != nil { if err != nil {
return err return err
} }
modTime := src.ModTime(ctx) modTime := src.ModTime()
object := storage.Object{ object := storage.Object{
Bucket: o.fs.bucket, Bucket: o.fs.bucket,
Name: o.fs.root + o.remote, Name: o.fs.root + o.remote,
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(src),
Updated: modTime.Format(timeFormatOut), // Doesn't get set
Metadata: metadataFromModTime(modTime), Metadata: metadataFromModTime(modTime),
} }
var newObject *storage.Object var newObject *storage.Object
@@ -1023,7 +1027,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) (err error) { func (o *Object) Remove() (err error) {
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do() err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
return shouldRetry(err) return shouldRetry(err)
@@ -1032,7 +1036,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }

View File

@@ -1,12 +1,14 @@
// Test GoogleCloudStorage filesystem interface // Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test package googlecloudstorage_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/googlecloudstorage" "github.com/ncw/rclone/backend/googlecloudstorage"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -1,148 +0,0 @@
// This file contains the albums abstraction
package googlephotos
import (
"path"
"strings"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)
// All the albums
type albums struct {
mu sync.Mutex
dupes map[string][]*api.Album // duplicated names
byID map[string]*api.Album //..indexed by ID
byTitle map[string]*api.Album //..indexed by Title
path map[string][]string // partial album names to directory
}
// Create a new album
func newAlbums() *albums {
return &albums{
dupes: map[string][]*api.Album{},
byID: map[string]*api.Album{},
byTitle: map[string]*api.Album{},
path: map[string][]string{},
}
}
// add an album
func (as *albums) add(album *api.Album) {
// Munge the name of the album into a sensible path name
album.Title = path.Clean(album.Title)
if album.Title == "." || album.Title == "/" {
album.Title = addID("", album.ID)
}
as.mu.Lock()
as._add(album)
as.mu.Unlock()
}
// _add an album - call with lock held
func (as *albums) _add(album *api.Album) {
// update dupes by title
dupes := as.dupes[album.Title]
dupes = append(dupes, album)
as.dupes[album.Title] = dupes
// Dedupe the album name if necessary
if len(dupes) >= 2 {
// If this is the first dupe, then need to adjust the first one
if len(dupes) == 2 {
firstAlbum := dupes[0]
as._del(firstAlbum)
as._add(firstAlbum)
// undo add of firstAlbum to dupes
as.dupes[album.Title] = dupes
}
album.Title = addID(album.Title, album.ID)
}
// Store the new album
as.byID[album.ID] = album
as.byTitle[album.Title] = album
// Store the partial paths
dir, leaf := album.Title, ""
for dir != "" {
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
found := false
for _, dir := range dirs {
if dir == leaf {
found = true
}
}
if !found {
as.path[dir] = append(as.path[dir], leaf)
}
}
}
// del an album
func (as *albums) del(album *api.Album) {
as.mu.Lock()
as._del(album)
as.mu.Unlock()
}
// _del an album - call with lock held
func (as *albums) _del(album *api.Album) {
// We leave in dupes so it doesn't cause albums to get renamed
// Remove from byID and byTitle
delete(as.byID, album.ID)
delete(as.byTitle, album.Title)
// Remove from paths
dir, leaf := album.Title, ""
for dir != "" {
// Can't delete if this dir exists anywhere in the path structure
if _, found := as.path[dir]; found {
break
}
i := strings.LastIndex(dir, "/")
if i >= 0 {
dir, leaf = dir[:i], dir[i+1:]
} else {
dir, leaf = "", dir
}
dirs := as.path[dir]
for i, dir := range dirs {
if dir == leaf {
dirs = append(dirs[:i], dirs[i+1:]...)
break
}
}
if len(dirs) == 0 {
delete(as.path, dir)
} else {
as.path[dir] = dirs
}
}
}
// get an album by title
func (as *albums) get(title string) (album *api.Album, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
album, ok = as.byTitle[title]
return album, ok
}
// getDirs gets directories below an album path
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
as.mu.Lock()
defer as.mu.Unlock()
dirs, ok = as.path[albumPath]
return dirs, ok
}

View File

@@ -1,311 +0,0 @@
package googlephotos
import (
"testing"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/stretchr/testify/assert"
)
func TestNewAlbums(t *testing.T) {
albums := newAlbums()
assert.NotNil(t, albums.dupes)
assert.NotNil(t, albums.byID)
assert.NotNil(t, albums.byTitle)
assert.NotNil(t, albums.path)
}
func TestAlbumsAdd(t *testing.T) {
albums := newAlbums()
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
}, albums.path)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two": a2,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two"},
}, albums.path)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
}, albums.path)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
// Add a weird path
a0 := &api.Album{
Title: "/../././..////.",
ID: "0",
}
albums.add(a0)
assert.Equal(t, map[string][]*api.Album{
"{0}": []*api.Album{a0},
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"0": a0,
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"{0}": a0,
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}", "{0}"},
"one": []string{"sub"},
}, albums.path)
}
func TestAlbumsDel(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
a2 := &api.Album{
Title: "two",
ID: "2",
}
albums.add(a2)
// Add a duplicate
a2a := &api.Album{
Title: "two",
ID: "2a",
}
albums.add(a2a)
// Add a sub directory
a1sub := &api.Album{
Title: "one/sub",
ID: "1sub",
}
albums.add(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1": a1,
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one": a1,
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2": a2,
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2}": a2,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2}", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"2a": a2a,
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
"two {2a}": a2a,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one", "two {2a}"},
"one": []string{"sub"},
}, albums.path)
albums.del(a2a)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{
"1sub": a1sub,
}, albums.byID)
assert.Equal(t, map[string]*api.Album{
"one/sub": a1sub,
}, albums.byTitle)
assert.Equal(t, map[string][]string{
"": []string{"one"},
"one": []string{"sub"},
}, albums.path)
albums.del(a1sub)
assert.Equal(t, map[string][]*api.Album{
"one": []*api.Album{a1},
"two": []*api.Album{a2, a2a},
"one/sub": []*api.Album{a1sub},
}, albums.dupes)
assert.Equal(t, map[string]*api.Album{}, albums.byID)
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
assert.Equal(t, map[string][]string{}, albums.path)
}
func TestAlbumsGet(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
album, ok := albums.get("one")
assert.Equal(t, true, ok)
assert.Equal(t, a1, album)
album, ok = albums.get("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, album)
}
func TestAlbumsGetDirs(t *testing.T) {
albums := newAlbums()
a1 := &api.Album{
Title: "one",
ID: "1",
}
albums.add(a1)
dirs, ok := albums.getDirs("")
assert.Equal(t, true, ok)
assert.Equal(t, []string{"one"}, dirs)
dirs, ok = albums.getDirs("notfound")
assert.Equal(t, false, ok)
assert.Nil(t, dirs)
}

View File

@@ -1,190 +0,0 @@
package api
import (
"fmt"
"time"
)
// ErrorDetails in the internals of the Error type
type ErrorDetails struct {
Code int `json:"code"`
Message string `json:"message"`
Status string `json:"status"`
}
// Error is returned on errors
type Error struct {
Details ErrorDetails `json:"error"`
}
// Error statisfies error interface
func (e *Error) Error() string {
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
}
// Album of photos
type Album struct {
ID string `json:"id,omitempty"`
Title string `json:"title"`
ProductURL string `json:"productUrl,omitempty"`
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
IsWriteable bool `json:"isWriteable,omitempty"`
}
// ListAlbums is returned from albums.list and sharedAlbums.list
type ListAlbums struct {
Albums []Album `json:"albums"`
SharedAlbums []Album `json:"sharedAlbums"`
NextPageToken string `json:"nextPageToken"`
}
// CreateAlbum creates an Album
type CreateAlbum struct {
Album *Album `json:"album"`
}
// MediaItem is a photo or video
type MediaItem struct {
ID string `json:"id"`
ProductURL string `json:"productUrl"`
BaseURL string `json:"baseUrl"`
MimeType string `json:"mimeType"`
MediaMetadata struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
Photo struct {
} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
// MediaItems is returned from mediaitems.list, mediaitems.search
type MediaItems struct {
MediaItems []MediaItem `json:"mediaItems"`
NextPageToken string `json:"nextPageToken"`
}
//Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
// CITYSCAPES Media items containing cityscapes.
// LANDMARKS Media items containing landmarks.
// SELFIES Media items that are selfies.
// PEOPLE Media items containing people.
// PETS Media items containing pets.
// WEDDINGS Media items from weddings.
// BIRTHDAYS Media items from birthdays.
// DOCUMENTS Media items containing documents.
// TRAVEL Media items taken during travel.
// ANIMALS Media items containing animals.
// FOOD Media items containing food.
// SPORT Media items from sporting events.
// NIGHT Media items taken at night.
// PERFORMANCES Media items from performances.
// WHITEBOARDS Media items containing whiteboards.
// SCREENSHOTS Media items that are screenshots.
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
// ARTS Media items containing art.
// CRAFTS Media items containing crafts.
// FASHION Media items related to fashion.
// HOUSES Media items containing houses.
// GARDENS Media items containing gardens.
// FLOWERS Media items containing flowers.
// HOLIDAYS Media items taken of holidays.
// MediaTypes
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
// Features
// NONE Treated as if no filters are applied. All features are included.
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
// Date is used as part of SearchFilter
type Date struct {
Year int `json:"year,omitempty"`
Month int `json:"month,omitempty"`
Day int `json:"day,omitempty"`
}
// DateFilter is uses to add date ranges to media item queries
type DateFilter struct {
Dates []Date `json:"dates,omitempty"`
Ranges []struct {
StartDate Date `json:"startDate,omitempty"`
EndDate Date `json:"endDate,omitempty"`
} `json:"ranges,omitempty"`
}
// ContentFilter is uses to add content categories to media item queries
type ContentFilter struct {
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
}
// MediaTypeFilter is uses to add media types to media item queries
type MediaTypeFilter struct {
MediaTypes []string `json:"mediaTypes,omitempty"`
}
// FeatureFilter is uses to add features to media item queries
type FeatureFilter struct {
IncludedFeatures []string `json:"includedFeatures,omitempty"`
}
// Filters combines all the filter types for media item queries
type Filters struct {
DateFilter *DateFilter `json:"dateFilter,omitempty"`
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
}
// SearchFilter is uses with mediaItems.search
type SearchFilter struct {
AlbumID string `json:"albumId,omitempty"`
PageSize int `json:"pageSize"`
PageToken string `json:"pageToken,omitempty"`
Filters *Filters `json:"filters,omitempty"`
}
// SimpleMediaItem is part of NewMediaItem
type SimpleMediaItem struct {
UploadToken string `json:"uploadToken"`
}
// NewMediaItem is a single media item for upload
type NewMediaItem struct {
Description string `json:"description"`
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
}
// BatchCreateRequest creates media items from upload tokens
type BatchCreateRequest struct {
AlbumID string `json:"albumId,omitempty"`
NewMediaItems []NewMediaItem `json:"newMediaItems"`
}
// BatchCreateResponse is returned from BatchCreateRequest
type BatchCreateResponse struct {
NewMediaItemResults []struct {
UploadToken string `json:"uploadToken"`
Status struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"status"`
MediaItem MediaItem `json:"mediaItem"`
} `json:"newMediaItemResults"`
}
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
MediaItemIds []string `json:"mediaItemIds"`
}

View File

@@ -1,970 +0,0 @@
// Package googlephotos provides an interface to Google Photos
package googlephotos
// FIXME Resumable uploads not implemented - rclone can't resume uploads in general
import (
"context"
"encoding/json"
"fmt"
"io"
golog "log"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
var (
errCantUpload = errors.New("can't upload files here")
errCantMkdir = errors.New("can't make directories here")
errCantRmdir = errors.New("can't remove this directory")
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
)
const (
rcloneClientID = "202264815644-rt1o1c9evjaotbpbab10m83i8cnjk077.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "kLJLretPefBgrDHosdml_nlF64HZ9mUcO85X5rdjYBPP8ChA-jr3Ow"
rootURL = "https://photoslibrary.googleapis.com/v1"
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
)
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
scopeReadWrite,
},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "google photos",
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
// Do the oauth
err = oauthutil.Config("google photos", name, m, oauthConfig)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
},
Options: []fs.Option{{
Name: config.ConfigClientID,
Help: "Google Application Client Id\nLeave blank normally.",
}, {
Name: config.ConfigClientSecret,
Help: "Google Application Client Secret\nLeave blank normally.",
}, {
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
If you choose read only then rclone will only request read only access
to your photos, otherwise rclone will request full access.`,
}, {
Name: "read_size",
Default: false,
Help: `Set to read the size of media items.
Normally rclone does not read the size of media items since this takes
another transaction. This isn't necessary for syncing. However
rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
}
// Fs represents a remote storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the one drive server
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps
albumsMu sync.Mutex // protect albums (but not contents)
albums map[bool]*albums // albums, shared or not
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
}
// Object describes a storage object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
url string // download path
id string // ID of this object
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Google Photos path %q", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// dirTime returns the time to set a directory to
func (f *Fs) dirTime() time.Time {
return f.startTime
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
body, err := rest.ReadBody(resp)
if err != nil {
body = nil
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
Message: string(body),
Status: resp.Status,
},
}
if body != nil {
_ = json.Unmarshal(body, &e)
}
return &e
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Box")
}
root = strings.Trim(path.Clean(root), "/")
if root == "." || root == "/" {
root = ""
}
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
startTime: time.Now(),
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
_, _, pattern := patterns.match(f.root, "", true)
if pattern != nil && pattern.isFile {
oldRoot := f.root
var leaf string
f.root, leaf = path.Split(f.root)
f.root = strings.TrimRight(f.root, "/")
_, err := f.NewObject(context.TODO(), leaf)
if err == nil {
return f, fs.ErrorIsFile
}
f.root = oldRoot
}
return f, nil
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.MediaItem) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
o.setMetaData(info)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
defer log.Trace(f, "remote=%q", remote)("")
return f.newObjectWithInfo(ctx, remote, nil)
}
// addID adds the ID to name
func addID(name string, ID string) string {
idStr := "{" + ID + "}"
if name == "" {
return idStr
}
return name + " " + idStr
}
// addFileID adds the ID to the fileName passed in
func addFileID(fileName string, ID string) string {
ext := path.Ext(fileName)
base := fileName[:len(fileName)-len(ext)]
return addID(base, ID) + ext
}
var idRe = regexp.MustCompile(`\{([A-Za-z0-9_-]{55,})\}`)
// findID finds an ID in string if one is there or ""
func findID(name string) string {
match := idRe.FindStringSubmatch(name)
if match == nil {
return ""
}
return match[1]
}
// list the albums into an internal cache
// FIXME cache invalidation
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
f.albumsMu.Lock()
defer f.albumsMu.Unlock()
all, ok := f.albums[shared]
if ok && all != nil {
return all, nil
}
opts := rest.Opts{
Method: "GET",
Path: "/albums",
Parameters: url.Values{},
}
if shared {
opts.Path = "/sharedAlbums"
}
all = newAlbums()
opts.Parameters.Set("pageSize", strconv.Itoa(albumChunks))
lastID := ""
for {
var result api.ListAlbums
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &result)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't list albums")
}
newAlbums := result.Albums
if shared {
newAlbums = result.SharedAlbums
}
if len(newAlbums) > 0 && newAlbums[0].ID == lastID {
// skip first if ID duplicated from last page
newAlbums = newAlbums[1:]
}
if len(newAlbums) > 0 {
lastID = newAlbums[len(newAlbums)-1].ID
}
for i := range newAlbums {
all.add(&newAlbums[i])
}
if result.NextPageToken == "" {
break
}
opts.Parameters.Set("pageToken", result.NextPageToken)
}
f.albums[shared] = all
return all, nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:search",
}
filter.PageSize = listChunks
filter.PageToken = ""
lastID := ""
for {
var result api.MediaItems
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, &filter, &result)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't list files")
}
items := result.MediaItems
if len(items) > 0 && items[0].ID == lastID {
// skip first if ID duplicated from last page
items = items[1:]
}
if len(items) > 0 {
lastID = items[len(items)-1].ID
}
for i := range items {
item := &result.MediaItems[i]
remote := item.Filename
remote = strings.Replace(remote, "/", "", -1)
err = fn(remote, item, false)
if err != nil {
return err
}
}
if result.NextPageToken == "" {
break
}
filter.PageToken = result.NextPageToken
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaItem, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
d := fs.NewDir(remote, f.dirTime())
return d, nil
}
o := &Object{
fs: f,
remote: remote,
}
o.setMetaData(item)
return o, nil
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// Dedupe the file names
dupes := map[string]int{}
for _, entry := range entries {
o, ok := entry.(*Object)
if ok {
dupes[o.remote]++
}
}
for _, entry := range entries {
o, ok := entry.(*Object)
if ok {
duplicated := dupes[o.remote] > 1
if duplicated || o.remote == "" {
o.remote = addFileID(o.remote, o.id)
}
}
}
return entries, err
}
// listUploads lists a single directory from the uploads
func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
f.uploadedMu.Lock()
entries, ok := f.uploaded[dir]
f.uploadedMu.Unlock()
if !ok && dir != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil || pattern.isFile {
return nil, fs.ErrorDirNotFound
}
if pattern.toEntries != nil {
return pattern.toEntries(ctx, f, prefix, match)
}
return nil, fs.ErrorDirNotFound
}
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
defer log.Trace(f, "src=%+v", src)("")
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
}
// createAlbum creates the album
func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/albums",
Parameters: url.Values{},
}
var request = api.CreateAlbum{
Album: &api.Album{
Title: albumTitle,
},
}
var result api.Album
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create album")
}
f.albums[false].add(&result)
return &result, nil
}
// getOrCreateAlbum gets an existing album or creates a new one
//
// It does the creation with the lock held to avoid duplicates
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
f.createMu.Lock()
defer f.createMu.Unlock()
albums, err := f.listAlbums(false)
if err != nil {
return nil, err
}
album, ok := albums.get(albumTitle)
if ok {
return album, nil
}
return f.createAlbum(ctx, albumTitle)
}
// Mkdir creates the album if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
match, prefix, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
}
if !pattern.canMkdir {
return errCantMkdir
}
if pattern.isUpload {
f.uploadedMu.Lock()
d := fs.NewDir(strings.Trim(prefix, "/"), f.dirTime())
f.uploaded.AddEntry(d)
f.uploadedMu.Unlock()
return nil
}
albumTitle := match[1]
_, err = f.getOrCreateAlbum(ctx, albumTitle)
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
defer log.Trace(f, "dir=%q")("err=%v", &err)
match, _, pattern := patterns.match(f.root, dir, false)
if pattern == nil {
return fs.ErrorDirNotFound
}
if !pattern.canMkdir {
return errCantRmdir
}
if pattern.isUpload {
f.uploadedMu.Lock()
err = f.uploaded.Prune(map[string]bool{
dir: true,
})
f.uploadedMu.Unlock()
return err
}
albumTitle := match[1]
allAlbums, err := f.listAlbums(false)
if err != nil {
return err
}
album, ok := allAlbums.get(albumTitle)
if !ok {
return fs.ErrorDirNotFound
}
_ = album
return errAlbumDelete
}
// Precision returns the precision
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
defer log.Trace(o, "")("")
if !o.fs.opt.ReadSize || o.bytes >= 0 {
return o.bytes
}
ctx := context.TODO()
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Size: Failed to read metadata: %v", err)
return -1
}
var resp *http.Response
opts := rest.Opts{
Method: "HEAD",
RootURL: o.downloadURL(),
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
fs.Debugf(o, "Reading size failed: %v", err)
} else {
lengthStr := resp.Header.Get("Content-Length")
length, err := strconv.ParseInt(lengthStr, 10, 64)
if err != nil {
fs.Debugf(o, "Reading size failed to parse Content_length %q: %v", lengthStr, err)
} else {
o.bytes = length
}
}
return o.bytes
}
// setMetaData sets the fs data from a storage.Object
func (o *Object) setMetaData(info *api.MediaItem) {
o.url = info.BaseURL
o.id = info.ID
o.bytes = -1 // FIXME
o.mimeType = info.MimeType
o.modTime = info.MediaMetadata.CreationTime
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if !o.modTime.IsZero() && o.url != "" {
return nil
}
dir, fileName := path.Split(o.remote)
dir = strings.Trim(dir, "/")
_, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil {
return fs.ErrorObjectNotFound
}
if !pattern.isFile {
return fs.ErrorNotAFile
}
// If have ID fetch it directly
if id := findID(fileName); id != "" {
opts := rest.Opts{
Method: "GET",
Path: "/mediaItems/" + id,
}
var item api.MediaItem
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't get media item")
}
o.setMetaData(&item)
return nil
}
// Otherwise list the directory the file is in
entries, err := o.fs.List(ctx, dir)
if err != nil {
if err == fs.ErrorDirNotFound {
return fs.ErrorObjectNotFound
}
return err
}
// and find the file in the directory
for _, entry := range entries {
if entry.Remote() == o.remote {
if newO, ok := entry.(*Object); ok {
*o = *newO
return nil
}
}
}
return fs.ErrorObjectNotFound
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
defer log.Trace(o, "")("")
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
return fs.ErrorCantSetModTime
}
// Storable returns a boolean as to whether this object is storable
func (o *Object) Storable() bool {
return true
}
// downloadURL returns the URL for a full bytes download for the object
func (o *Object) downloadURL() string {
url := o.url + "=d"
if strings.HasPrefix(o.mimeType, "video/") {
url += "v"
}
return url
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
defer log.Trace(o, "")("")
err = o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
return nil, err
}
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.downloadURL(),
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
return shouldRetry(resp, err)
})
if err != nil {
return nil, err
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload {
return errCantUpload
}
var (
albumID string
fileName string
)
if pattern.isUpload {
fileName = match[1]
} else {
var albumTitle string
albumTitle, fileName = match[1], match[2]
album, err := o.fs.getOrCreateAlbum(ctx, albumTitle)
if err != nil {
return err
}
if !album.IsWriteable {
return errOwnAlbums
}
albumID = album.ID
}
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",
},
Body: in,
}
var token []byte
var resp *http.Response
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err = o.fs.srv.Call(&opts)
if err != nil {
_ = resp.Body.Close()
return shouldRetry(resp, err)
}
token, err = rest.ReadBody(resp)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't upload file")
}
uploadToken := strings.TrimSpace(string(token))
if uploadToken == "" {
return errors.New("empty upload token")
}
// Create the media item from an UploadToken, optionally adding to an album
opts = rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
NewMediaItems: []api.NewMediaItem{
{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: uploadToken,
},
},
},
}
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "failed to create media item")
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return errors.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
// Add upload to internal storage
if pattern.isUpload {
o.fs.uploaded.AddEntry(o)
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
if pattern == nil || !pattern.isFile || !pattern.canUpload || pattern.isUpload {
return errRemove
}
albumTitle, fileName := match[1], match[2]
album, ok := o.fs.albums[false].get(albumTitle)
if !ok {
return errors.Errorf("couldn't file %q in album %q for delete", fileName, albumTitle)
}
opts := rest.Opts{
Method: "POST",
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
NoResponse: true,
}
var request = api.BatchRemoveItems{
MediaItemIds: []string{o.id},
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
return shouldRetry(resp, err)
})
if err != nil {
return errors.Wrap(err, "couldn't delete item from album")
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.mimeType
}
// ID of an Object if known, "" otherwise
func (o *Object) ID() string {
return o.id
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -1,306 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"path"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
// We have two different files here as Google Photos will uniq
// them otherwise which confuses the tests as the filename is
// unexpected.
fileNameAlbum = "rclone-test-image1.jpg"
fileNameUpload = "rclone-test-image2.jpg"
)
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
func TestIntegration(t *testing.T) {
ctx := context.Background()
fstest.Initialise()
// Create Fs
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(*fstest.RemoteName)
if err == fs.ErrorNotFoundInConfigFile {
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
}
require.NoError(t, err)
// Create local Fs pointing at testfiles
localFs, err := fs.NewFs("testfiles")
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + fstest.RandomString(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum
t.Run("PutFile", func(t *testing.T) {
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("ObjectFs", func(t *testing.T) {
assert.Equal(t, f, dstObj.Fs())
})
t.Run("ObjectString", func(t *testing.T) {
assert.Equal(t, remote, dstObj.String())
assert.Equal(t, "<nil>", (*Object)(nil).String())
})
t.Run("ObjectHash", func(t *testing.T) {
h, err := dstObj.Hash(ctx, hash.MD5)
assert.Equal(t, "", h)
assert.Equal(t, hash.ErrUnsupported, err)
})
t.Run("ObjectSize", func(t *testing.T) {
assert.Equal(t, int64(-1), dstObj.Size())
f.(*Fs).opt.ReadSize = true
defer func() {
f.(*Fs).opt.ReadSize = false
}()
size := dstObj.Size()
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
})
t.Run("ObjectSetModTime", func(t *testing.T) {
err := dstObj.SetModTime(ctx, time.Now())
assert.Equal(t, fs.ErrorCantSetModTime, err)
})
t.Run("ObjectStorable", func(t *testing.T) {
assert.True(t, dstObj.Storable())
})
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
contentType := http.DetectContentType(buf[:512])
assert.Equal(t, "image/jpeg", contentType)
})
t.Run("CheckFileInAlbum", func(t *testing.T) {
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)
require.NoError(t, err)
found := false
for _, entry := range entries {
leaf := path.Base(entry.Remote())
if leaf == fileNameAlbum || leaf == remoteWithID {
found = true
}
}
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
}
t.Run("CheckInByYear", func(t *testing.T) {
checkPresent(t, "media/by-year/2013")
})
t.Run("CheckInByMonth", func(t *testing.T) {
checkPresent(t, "media/by-month/2013/2013-07")
})
t.Run("CheckInByDay", func(t *testing.T) {
checkPresent(t, "media/by-day/2013/2013-07-26")
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
t.Run("NewFsIsFile", func(t *testing.T) {
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
assert.Equal(t, fs.ErrorIsFile, err)
leaf := path.Base(remote)
o, err := fNew.NewObject(ctx, leaf)
require.NoError(t, err)
require.Equal(t, leaf, o.Remote())
})
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
err = dstObj.Remove(ctx)
require.NoError(t, err)
time.Sleep(time.Second)
// Check album empty
entries, err := f.List(ctx, albumName)
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
})
})
// remove the album
err = f.Rmdir(ctx, albumName)
require.Error(t, err) // FIXME doesn't work yet
})
t.Run("UploadMkdir", func(t *testing.T) {
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir", entries[0].Remote())
entries, err = f.List(ctx, "upload/dir")
require.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
})
t.Run("Rmdir", func(t *testing.T) {
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
})
t.Run("ListEmpty", func(t *testing.T) {
entries, err := f.List(ctx, "upload")
require.NoError(t, err)
assert.Equal(t, 0, len(entries))
_, err = f.List(ctx, "upload/dir")
assert.Equal(t, fs.ErrorDirNotFound, err)
})
})
t.Run("Upload", func(t *testing.T) {
uploadDir := "upload/dir/subdir"
remote := path.Join(uploadDir, fileNameUpload)
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
remoteWithID := addFileID(remote, dstObj.(*Object).id)
t.Run("List", func(t *testing.T) {
entries, err := f.List(ctx, uploadDir)
require.NoError(t, err)
require.Equal(t, 1, len(entries))
assert.Equal(t, remote, entries[0].Remote())
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
t.Run("NewObject", func(t *testing.T) {
o, err := f.NewObject(ctx, remote)
require.NoError(t, err)
require.Equal(t, remote, o.Remote())
})
t.Run("NewObjectWithID", func(t *testing.T) {
o, err := f.NewObject(ctx, remoteWithID)
require.NoError(t, err)
require.Equal(t, remoteWithID, o.Remote())
})
})
t.Run("Name", func(t *testing.T) {
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
})
t.Run("Root", func(t *testing.T) {
assert.Equal(t, "", f.Root())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, `Google Photos path ""`, f.String())
})
t.Run("Features", func(t *testing.T) {
features := f.Features()
assert.False(t, features.CaseInsensitive)
assert.True(t, features.ReadMimeType)
})
t.Run("Precision", func(t *testing.T) {
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
})
t.Run("Hashes", func(t *testing.T) {
assert.Equal(t, hash.Set(hash.None), f.Hashes())
})
}
func TestAddID(t *testing.T) {
assert.Equal(t, "potato {123}", addID("potato", "123"))
assert.Equal(t, "{123}", addID("", "123"))
}
func TestFileAddID(t *testing.T) {
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
assert.Equal(t, "{123}", addFileID("", "123"))
}
func TestFindID(t *testing.T) {
assert.Equal(t, "", findID("potato"))
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
ID = ID[1:]
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
}

View File

@@ -1,335 +0,0 @@
// Store the parsing of file patterns
package googlephotos
import (
"context"
"fmt"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
)
// lister describes the subset of the interfaces on Fs needed for the
// file pattern parsing
type lister interface {
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
listAlbums(shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
}
// dirPattern describes a single directory pattern
type dirPattern struct {
re string // match for the path
match *regexp.Regexp // compiled match
canUpload bool // true if can upload here
canMkdir bool // true if can make a directory here
isFile bool // true if this is a file
isUpload bool // true if this is the upload directory
// function to turn a match into DirEntries
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
}
// dirPatters is a slice of all the directory patterns
type dirPatterns []dirPattern
// patterns describes the layout of the google photos backend file system.
//
// NB no trailing / on paths
var patterns = dirPatterns{
{
re: `^$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"media", f.dirTime()),
fs.NewDir(prefix+"album", f.dirTime()),
fs.NewDir(prefix+"shared-album", f.dirTime()),
fs.NewDir(prefix+"upload", f.dirTime()),
}, nil
},
},
{
re: `^upload(?:/(.*))?$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listUploads(ctx, match[0])
},
canUpload: true,
canMkdir: true,
isUpload: true,
},
{
re: `^upload/(.*)$`,
isFile: true,
canUpload: true,
isUpload: true,
},
{
re: `^media$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return fs.DirEntries{
fs.NewDir(prefix+"all", f.dirTime()),
fs.NewDir(prefix+"by-year", f.dirTime()),
fs.NewDir(prefix+"by-month", f.dirTime()),
fs.NewDir(prefix+"by-day", f.dirTime()),
}, nil
},
},
{
re: `^media/all$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
return f.listDir(ctx, prefix, api.SearchFilter{})
},
},
{
re: `^media/all/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-year$`,
toEntries: years,
},
{
re: `^media/by-year/(\d{4})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-year/(\d{4})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-month$`,
toEntries: years,
},
{
re: `^media/by-month/(\d{4})$`,
toEntries: months,
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^media/by-day$`,
toEntries: years,
},
{
re: `^media/by-day/(\d{4})$`,
toEntries: days,
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
filter, err := yearMonthDayFilter(ctx, f, match)
if err != nil {
return nil, err
}
return f.listDir(ctx, prefix, filter)
},
},
{
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
isFile: true,
},
{
re: `^album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, "")
},
},
{
re: `^album/(.+)$`,
canMkdir: true,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, false, prefix, match[1])
},
},
{
re: `^album/(.+?)/([^/]+)$`,
canUpload: true,
isFile: true,
},
{
re: `^shared-album$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, "")
},
},
{
re: `^shared-album/(.+)$`,
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
return albumsToEntries(ctx, f, true, prefix, match[1])
},
},
{
re: `^shared-album/(.+?)/([^/]+)$`,
isFile: true,
},
}.mustCompile()
// mustCompile compiles the regexps in the dirPatterns
func (ds dirPatterns) mustCompile() dirPatterns {
for i := range ds {
pattern := &ds[i]
pattern.match = regexp.MustCompile(pattern.re)
}
return ds
}
// match finds the path passed in in the matching structure and
// returns the parameters and a pointer to the match, or nil.
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
itemPath = strings.Trim(itemPath, "/")
absPath := path.Join(root, itemPath)
prefix = strings.Trim(absPath[len(root):], "/")
if prefix != "" {
prefix += "/"
}
for i := range ds {
pattern = &ds[i]
if pattern.isFile != isFile {
continue
}
match = pattern.match.FindStringSubmatch(absPath)
if match != nil {
return
}
}
return nil, "", nil
}
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil
}
// Return the months in a given year
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
for month := 1; month <= 12; month++ {
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
}
return entries, nil
}
// Return the days in a given year
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
year := match[1]
current, err := time.Parse("2006", year)
if err != nil {
return nil, errors.Errorf("bad year %q", match[1])
}
currentYear := current.Year()
for current.Year() == currentYear {
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
current = current.AddDate(0, 0, 1)
}
return entries, nil
}
// This creates a search filter on year/month/day as provided
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
year, err := strconv.Atoi(match[1])
if err != nil || year < 1000 || year > 3000 {
return sf, errors.Errorf("bad year %q", match[1])
}
sf = api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: year,
},
},
},
},
}
if len(match) >= 3 {
month, err := strconv.Atoi(match[2])
if err != nil || month < 1 || month > 12 {
return sf, errors.Errorf("bad month %q", match[2])
}
sf.Filters.DateFilter.Dates[0].Month = month
}
if len(match) >= 4 {
day, err := strconv.Atoi(match[3])
if err != nil || day < 1 || day > 31 {
return sf, errors.Errorf("bad day %q", match[3])
}
sf.Filters.DateFilter.Dates[0].Day = day
}
return sf, nil
}
// Turns an albumPath into entries
//
// These can either be synthetic directory entries if the album path
// is a prefix of another album, or actual files, or a combination of
// the two.
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
albums, err := f.listAlbums(shared)
if err != nil {
return nil, err
}
// Put in the directories
dirs, foundAlbumPath := albums.getDirs(albumPath)
if foundAlbumPath {
for _, dir := range dirs {
d := fs.NewDir(prefix+dir, f.dirTime())
dirPath := path.Join(albumPath, dir)
// if this dir is an album add more special stuff
album, ok := albums.get(dirPath)
if ok {
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
if err != nil {
fs.Debugf(f, "Error reading media count: %v", err)
}
d.SetID(album.ID).SetItems(count)
}
entries = append(entries, d)
}
}
// if this is an album then return a filter to list it
album, foundAlbum := albums.get(albumPath)
if foundAlbum {
filter := api.SearchFilter{AlbumID: album.ID}
newEntries, err := f.listDir(ctx, prefix, filter)
if err != nil {
return nil, err
}
entries = append(entries, newEntries...)
}
if !foundAlbumPath && !foundAlbum && albumPath != "" {
return nil, fs.ErrorDirNotFound
}
return entries, nil
}

View File

@@ -1,495 +0,0 @@
package googlephotos
import (
"context"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// time for directories
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
// mock Fs for testing patterns
type testLister struct {
t *testing.T
albums *albums
names []string
uploaded dirtree.DirTree
}
// newTestLister makes a mock for testing
func newTestLister(t *testing.T) *testLister {
return &testLister{
t: t,
albums: newAlbums(),
uploaded: dirtree.New(),
}
}
// mock listDir for testing
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
for _, name := range f.names {
entries = append(entries, mockobject.New(prefix+name))
}
return entries, nil
}
// mock listAlbums for testing
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
return f.albums, nil
}
// mock listUploads for testing
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entries, _ = f.uploaded[dir]
return entries, nil
}
// mock dirTime for testing
func (f *testLister) dirTime() time.Time {
return startTime
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input
root string
itemPath string
isFile bool
// expected output
wantMatch []string
wantPrefix string
wantPattern *dirPattern
}{
{
root: "",
itemPath: "",
isFile: false,
wantMatch: []string{""},
wantPrefix: "",
wantPattern: &patterns[0],
},
{
root: "",
itemPath: "",
isFile: true,
wantMatch: nil,
wantPrefix: "",
wantPattern: nil,
},
{
root: "upload",
itemPath: "",
isFile: false,
wantMatch: []string{"upload", ""},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/dir",
itemPath: "",
isFile: false,
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "",
wantPattern: &patterns[1],
},
{
root: "upload/file.jpg",
itemPath: "",
isFile: true,
wantMatch: []string{"upload/file.jpg", "file.jpg"},
wantPrefix: "",
wantPattern: &patterns[2],
},
{
root: "media",
itemPath: "",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "",
wantPattern: &patterns[3],
},
{
root: "",
itemPath: "media",
isFile: false,
wantMatch: []string{"media"},
wantPrefix: "media/",
wantPattern: &patterns[3],
},
{
root: "media/all",
itemPath: "",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "",
wantPattern: &patterns[4],
},
{
root: "media",
itemPath: "all",
isFile: false,
wantMatch: []string{"media/all"},
wantPrefix: "all/",
wantPattern: &patterns[4],
},
{
root: "media/all",
itemPath: "file.jpg",
isFile: true,
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
wantPrefix: "file.jpg/",
wantPattern: &patterns[5],
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
assert.Equal(t, test.wantMatch, gotMatch)
assert.Equal(t, test.wantPrefix, gotPrefix)
assert.Equal(t, test.wantPattern, gotPattern)
})
}
}
func TestPatternMatchToEntries(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
f.names = []string{"file.jpg"}
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
f.albums.add(&api.Album{
ID: "2",
Title: "sub",
})
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
for testNumber, test := range []struct {
// input
root string
itemPath string
// expected output
wantMatch []string
wantPrefix string
remotes []string
}{
{
root: "",
itemPath: "",
wantMatch: []string{""},
wantPrefix: "",
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
},
{
root: "upload",
itemPath: "",
wantMatch: []string{"upload", ""},
wantPrefix: "",
remotes: []string{"upload/file1.jpg", "upload/dir/"},
},
{
root: "upload",
itemPath: "dir",
wantMatch: []string{"upload/dir", "dir"},
wantPrefix: "dir/",
remotes: []string{"upload/dir/file2.jpg"},
},
{
root: "media",
itemPath: "",
wantMatch: []string{"media"},
wantPrefix: "",
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
},
{
root: "media/all",
itemPath: "",
wantMatch: []string{"media/all"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media",
itemPath: "all",
wantMatch: []string{"media/all"},
wantPrefix: "all/",
remotes: []string{"all/file.jpg"},
},
{
root: "media/by-year",
itemPath: "",
wantMatch: []string{"media/by-year"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-year/2000",
itemPath: "",
wantMatch: []string{"media/by-year/2000", "2000"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-month",
itemPath: "",
wantMatch: []string{"media/by-month"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-month/2001",
itemPath: "",
wantMatch: []string{"media/by-month/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
},
{
root: "media/by-month/2001/2001-01",
itemPath: "",
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "media/by-day",
itemPath: "",
wantMatch: []string{"media/by-day"},
wantPrefix: "",
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
},
{
root: "media/by-day/2001",
itemPath: "",
wantMatch: []string{"media/by-day/2001", "2001"},
wantPrefix: "",
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
},
{
root: "media/by-day/2001/2001-01-02",
itemPath: "",
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "album",
itemPath: "",
wantMatch: []string{"album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "album/sub",
itemPath: "",
wantMatch: []string{"album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "album/sub/one",
itemPath: "",
wantMatch: []string{"album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
{
root: "shared-album",
itemPath: "",
wantMatch: []string{"shared-album"},
wantPrefix: "",
remotes: []string{"sub/"},
},
{
root: "shared-album/sub",
itemPath: "",
wantMatch: []string{"shared-album/sub", "sub"},
wantPrefix: "",
remotes: []string{"one/", "file.jpg"},
},
{
root: "shared-album/sub/one",
itemPath: "",
wantMatch: []string{"shared-album/sub/one", "sub/one"},
wantPrefix: "",
remotes: []string{"file.jpg"},
},
} {
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
assert.Equal(t, test.wantMatch, match)
assert.Equal(t, test.wantPrefix, prefix)
assert.NotNil(t, pattern)
assert.NotNil(t, pattern.toEntries)
entries, err := pattern.toEntries(ctx, f, prefix, match)
assert.NoError(t, err)
var remotes = []string{}
for _, entry := range entries {
remote := entry.Remote()
if _, isDir := entry.(fs.Directory); isDir {
remote += "/"
}
remotes = append(remotes, remote)
if len(remotes) >= 4 {
break // only test first 4 entries
}
}
assert.Equal(t, test.remotes, remotes)
})
}
}
func TestPatternYears(t *testing.T) {
f := newTestLister(t)
entries, err := years(context.Background(), f, "potato/", nil)
require.NoError(t, err)
year := 2000
for _, entry := range entries {
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
year++
}
}
func TestPatternMonths(t *testing.T) {
f := newTestLister(t)
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 12, len(entries))
for i, entry := range entries {
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
}
}
func TestPatternDays(t *testing.T) {
f := newTestLister(t)
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
require.NoError(t, err)
assert.Equal(t, 366, len(entries))
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
}
func TestPatternYearMonthDayFilter(t *testing.T) {
ctx := context.Background()
f := newTestLister(t)
// Years
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
require.Error(t, err)
// Months
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
require.Error(t, err)
// Days
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
require.NoError(t, err)
assert.Equal(t, api.SearchFilter{
Filters: &api.Filters{
DateFilter: &api.DateFilter{
Dates: []api.Date{
{
Day: 2,
Month: 1,
Year: 2000,
},
},
},
},
}, sf)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
require.Error(t, err)
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
require.Error(t, err)
}
func TestPatternAlbumsToEntries(t *testing.T) {
f := newTestLister(t)
ctx := context.Background()
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.Equal(t, fs.ErrorDirNotFound, err)
f.albums.add(&api.Album{
ID: "1",
Title: "sub/one",
})
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 1, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok := entries[0].(fs.Directory)
assert.Equal(t, true, ok)
f.albums.add(&api.Album{
ID: "1",
Title: "sub",
})
f.names = []string{"file.jpg"}
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
assert.NoError(t, err)
assert.Equal(t, 2, len(entries))
assert.Equal(t, "potato/one", entries[0].Remote())
_, ok = entries[0].(fs.Directory)
assert.Equal(t, true, ok)
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
_, ok = entries[1].(fs.Object)
assert.Equal(t, true, ok)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -5,7 +5,6 @@
package http package http
import ( import (
"context"
"io" "io"
"mime" "mime"
"net/http" "net/http"
@@ -15,13 +14,13 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/net/html" "golang.org/x/net/html"
) )
@@ -208,7 +207,7 @@ func (f *Fs) Precision() time.Duration {
} }
// NewObject creates a new remote http file object // NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: remote, remote: remote,
@@ -360,7 +359,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if !strings.HasSuffix(dir, "/") && dir != "" { if !strings.HasSuffix(dir, "/") && dir != "" {
dir += "/" dir += "/"
} }
@@ -400,12 +399,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return
// nil and the error // nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly return nil, errorReadOnly
} }
@@ -428,7 +427,7 @@ func (o *Object) Remote() string {
} }
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes // Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { func (o *Object) Hash(r hash.Type) (string, error) {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -438,7 +437,7 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the remote http file // ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.modTime return o.modTime
} }
@@ -481,7 +480,7 @@ func (o *Object) stat() error {
// SetModTime sets the modification and access time to the specified time // SetModTime sets the modification and access time to the specified time
// //
// it also updates the info field // it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return errorReadOnly return errorReadOnly
} }
@@ -491,7 +490,7 @@ func (o *Object) Storable() bool {
} }
// Open a remote http file object for reading. Seek is supported // Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.url() url := o.url()
req, err := http.NewRequest("GET", url, nil) req, err := http.NewRequest("GET", url, nil)
if err != nil { if err != nil {
@@ -518,27 +517,27 @@ func (f *Fs) Hashes() hash.Set {
} }
// Mkdir makes the root directory of the Fs object // Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
return errorReadOnly return errorReadOnly
} }
// Remove a remote http file object // Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return errorReadOnly return errorReadOnly
} }
// Rmdir removes the root directory of the Fs object // Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return errorReadOnly return errorReadOnly
} }
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly return errorReadOnly
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.contentType return o.contentType
} }

View File

@@ -1,7 +1,8 @@
// +build go1.8
package http package http
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@@ -13,11 +14,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/ncw/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/rclone/rclone/lib/rest" "github.com/ncw/rclone/lib/rest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -65,7 +66,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
} }
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) { func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
entries, err := f.List(context.Background(), "") entries, err := f.List("")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -121,7 +122,7 @@ func TestListSubDir(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
entries, err := f.List(context.Background(), "three") entries, err := f.List("three")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)
@@ -139,7 +140,7 @@ func TestNewObject(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote()) assert.Equal(t, "four/under four.txt", o.Remote())
@@ -149,7 +150,7 @@ func TestNewObject(t *testing.T) {
// Test the time is correct on the object // Test the time is correct on the object
tObj := o.ModTime(context.Background()) tObj := o.ModTime()
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt")) fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err) require.NoError(t, err)
@@ -159,7 +160,7 @@ func TestNewObject(t *testing.T) {
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second)) assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found // check object not found
o, err = f.NewObject(context.Background(), "not found.txt") o, err = f.NewObject("not found.txt")
assert.Nil(t, o) assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err) assert.Equal(t, fs.ErrorObjectNotFound, err)
} }
@@ -168,11 +169,11 @@ func TestOpen(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
// Test normal read // Test normal read
fd, err := o.Open(context.Background()) fd, err := o.Open()
require.NoError(t, err) require.NoError(t, err)
data, err := ioutil.ReadAll(fd) data, err := ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -180,7 +181,7 @@ func TestOpen(t *testing.T) {
assert.Equal(t, "beetroot\n", string(data)) assert.Equal(t, "beetroot\n", string(data))
// Test with range request // Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5}) fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err) require.NoError(t, err)
data, err = ioutil.ReadAll(fd) data, err = ioutil.ReadAll(fd)
require.NoError(t, err) require.NoError(t, err)
@@ -192,12 +193,12 @@ func TestMimeType(t *testing.T) {
f, tidy := prepare(t) f, tidy := prepare(t)
defer tidy() defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt") o, err := f.NewObject("four/under four.txt")
require.NoError(t, err) require.NoError(t, err)
do, ok := o.(fs.MimeTyper) do, ok := o.(fs.MimeTyper)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background())) assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
} }
func TestIsAFileRoot(t *testing.T) { func TestIsAFileRoot(t *testing.T) {
@@ -217,7 +218,7 @@ func TestIsAFileSubDir(t *testing.T) {
f, err := NewFs(remoteName, "three/underthree.txt", m) f, err := NewFs(remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile) assert.Equal(t, err, fs.ErrorIsFile)
entries, err := f.List(context.Background(), "") entries, err := f.List("")
require.NoError(t, err) require.NoError(t, err)
sort.Sort(entries) sort.Sort(entries)

View File

@@ -24,7 +24,7 @@
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td>&nbsp;</td></tr>
<tr><th colspan="5"><hr></th></tr> <tr><th colspan="5"><hr></th></tr>
<!-- some extras from https://github.com/rclone/rclone/issues/1573 --> <!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr> <tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td>&nbsp;</td></tr>

View File

@@ -4,8 +4,8 @@ import (
"net/http" "net/http"
"time" "time"
"github.com/ncw/rclone/fs"
"github.com/ncw/swift" "github.com/ncw/swift"
"github.com/rclone/rclone/fs"
) )
// auth is an authenticator for swift // auth is an authenticator for swift

View File

@@ -15,16 +15,16 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/swift"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/lib/oauthutil"
swiftLib "github.com/ncw/swift" swiftLib "github.com/ncw/swift"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )

View File

@@ -4,8 +4,8 @@ package hubic_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/hubic" "github.com/ncw/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
@@ -13,7 +13,5 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:", RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil), NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
}) })
} }

View File

@@ -314,9 +314,3 @@ type UploadResponse struct {
Deleted interface{} `json:"deleted"` Deleted interface{} `json:"deleted"`
Mime string `json:"mime"` Mime string `json:"mime"`
} }
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

View File

@@ -2,14 +2,12 @@ package jottacloud
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@@ -18,21 +16,21 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/backend/jottacloud/api"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/backend/jottacloud/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -42,21 +40,15 @@ const (
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta" defaultDevice = "Jotta"
defaultMountpoint = "Archive" defaultMountpoint = "Sync" // nolint
rootURL = "https://www.jottacloud.com/jfs/" rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/files/v1/" apiURL = "https://api.jottacloud.com/files/v1/"
baseURL = "https://www.jottacloud.com/" baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token" tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-" cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40" rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user" configUsername = "user"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
) )
var ( var (
@@ -66,13 +58,14 @@ var (
AuthURL: tokenURL, AuthURL: tokenURL,
TokenURL: tokenURL, TokenURL: tokenURL,
}, },
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
} }
) )
// Register with Fs // Register with Fs
func init() { func init() {
// needs to be done early so we can use oauth during config
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "jottacloud", Name: "jottacloud",
Description: "JottaCloud", Description: "JottaCloud",
@@ -86,62 +79,14 @@ func init() {
} }
} }
srv := rest.NewClient(fshttp.NewClient(fs.Config))
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration api.DeviceRegistrationResponse
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
username, ok := m.Get(configUsername) username, ok := m.Get(configUsername)
if !ok { if !ok {
log.Fatalf("No username defined") log.Fatalf("No username defined")
} }
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.") password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
// prepare out token request with username and password // prepare out token request with username and password
srv := rest.NewClient(fshttp.NewClient(fs.Config))
values := url.Values{} values := url.Values{}
values.Set("grant_type", "PASSWORD") values.Set("grant_type", "PASSWORD")
values.Set("password", password) values.Set("password", password)
@@ -161,7 +106,7 @@ func init() {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header // if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil { if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" { if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n") fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ") fmt.Printf("Enter verification code> ")
authCode := config.ReadLine() authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
@@ -184,49 +129,23 @@ func init() {
// finally save them in the config // finally save them in the config
err = oauthutil.PutToken(name, m, &token, true) err = oauthutil.PutToken(name, m, &token, true)
if err != nil { if err != nil {
log.Fatalf("Error while saving token: %s", err) log.Fatalf("Error while setting token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
acc, err := getAccountInfo(srv, username)
if err != nil {
log.Fatalf("Error getting devices: %s", err)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
result := config.Choose("Devices", deviceNames, nil, false)
m.Set(configDevice, result)
dev, err := getDeviceInfo(srv, path.Join(username, result))
if err != nil {
log.Fatalf("Error getting Mountpoint: %s", err)
}
if len(dev.MountPoints) == 0 {
log.Fatalf("No Mountpoints found for this device.")
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
result = config.Choose("Mountpoints", mountpointNames, nil, false)
m.Set(configMountpoint, result)
} }
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: configUsername, Name: configUsername,
Help: "User Name:", Help: "User Name:",
}, {
Name: "mountpoint",
Help: "The mountpoint to use.",
Required: true,
Examples: []fs.OptionExample{{
Value: "Sync",
Help: "Will be synced by the official client.",
}, {
Value: "Archive",
Help: "Archive",
}},
}, { }, {
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -254,7 +173,6 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
User string `config:"user"` User string `config:"user"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"` Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
@@ -362,31 +280,18 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
return &result, nil return &result, nil
} }
// getAccountInfo queries general information about the account. // getAccountInfo retrieves account information
// Takes rest.Client and username as parameter to be easily usable func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
// during config
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: urlPathEscape(username), Path: urlPathEscape(f.user),
} }
_, err = srv.CallXML(&opts, nil, &info) var resp *http.Response
if err != nil { err = f.pacer.Call(func() (bool, error) {
return nil, err resp, err = f.srv.CallXML(&opts, nil, &info)
} return shouldRetry(resp, err)
})
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -395,18 +300,12 @@ func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err er
} }
// setEndpointUrl reads the account id and generates the API endpoint URL // setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL() (err error) { func (f *Fs) setEndpointURL(mountpoint string) (err error) {
info, err := getAccountInfo(f.srv, f.user) info, err := f.getAccountInfo()
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get endpoint url") return errors.Wrap(err, "failed to get endpoint url")
} }
if f.opt.Device == "" { f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
return nil return nil
} }
@@ -482,16 +381,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/") rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root) root = parsePath(root)
clientID, ok := m.Get(configClientID) // add jottacloud to the long list of sites that don't follow the oauth spec correctly
if !ok { oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs // the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above) // a filter to fix the grant_type issues (see above)
@@ -531,7 +422,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return err return err
}) })
err = f.setEndpointURL() err = f.setEndpointURL(opt.Mountpoint)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get account info") return nil, errors.Wrap(err, "couldn't get account info")
} }
@@ -543,7 +434,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if f.root == "." { if f.root == "." {
f.root = "" f.root = ""
} }
_, err := f.NewObject(context.TODO(), remote) _, err := f.NewObject(remote)
if err != nil { if err != nil {
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile { if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
// File doesn't exist so return old f // File doesn't exist so return old f
@@ -581,7 +472,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound. // it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil) return f.newObjectWithInfo(remote, nil)
} }
@@ -618,7 +509,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
//fmt.Printf("List: %s\n", f.filePath(dir)) //fmt.Printf("List: %s\n", f.filePath(dir))
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
@@ -735,7 +626,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
// //
// Don't implement this unless you have a more efficient way // Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal. // of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(dir), Path: f.filePath(dir),
@@ -788,17 +679,14 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.Device != "Jotta" { o := f.createObject(src.Remote(), src.ModTime(), src.Size())
return nil, errors.New("upload not supported for devices other than Jotta") return o, o.Update(in, src, options...)
}
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
return o, o.Update(ctx, in, src, options...)
} }
// mkParentDir makes the parent of the native path dirPath if // mkParentDir makes the parent of the native path dirPath if
// necessary and any directories above that // necessary and any directories above that
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error { func (f *Fs) mkParentDir(dirPath string) error {
// defer log.Trace(dirPath, "")("") // defer log.Trace(dirPath, "")("")
// chop off trailing / if it exists // chop off trailing / if it exists
if strings.HasSuffix(dirPath, "/") { if strings.HasSuffix(dirPath, "/") {
@@ -808,25 +696,25 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
if parent == "." { if parent == "." {
parent = "" parent = ""
} }
return f.Mkdir(ctx, parent) return f.Mkdir(parent)
} }
// Mkdir creates the container if it doesn't exist // Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
_, err := f.CreateDir(dir) _, err := f.CreateDir(dir)
return err return err
} }
// purgeCheck removes the root directory, if check is set then it // purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in // refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) { func (f *Fs) purgeCheck(dir string, check bool) (err error) {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
return errors.New("can't purge root directory") return errors.New("can't purge root directory")
} }
// check that the directory exists // check that the directory exists
entries, err := f.List(ctx, dir) entries, err := f.List(dir)
if err != nil { if err != nil {
return err return err
} }
@@ -866,8 +754,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
// Rmdir deletes the root folder // Rmdir deletes the root folder
// //
// Returns an error if it isn't empty // Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
return f.purgeCheck(ctx, dir, true) return f.purgeCheck(dir, true)
} }
// Precision return the precision of this Fs // Precision return the precision of this Fs
@@ -880,8 +768,8 @@ func (f *Fs) Precision() time.Duration {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
return f.purgeCheck(ctx, "", false) return f.purgeCheck("", false)
} }
// copyOrMoves copies or moves directories or files depending on the method parameter // copyOrMoves copies or moves directories or files depending on the method parameter
@@ -914,14 +802,14 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantCopy // If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't copy - not same remote type") fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(ctx, remote) err := f.mkParentDir(remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -944,14 +832,14 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
err := f.mkParentDir(ctx, remote) err := f.mkParentDir(remote)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -973,7 +861,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -990,7 +878,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath) //fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
var err error var err error
_, err = f.List(ctx, dstRemote) _, err = f.List(dstRemote)
if err == fs.ErrorDirNotFound { if err == fs.ErrorDirNotFound {
// OK // OK
} else if err != nil { } else if err != nil {
@@ -1008,7 +896,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// PublicLink generates a public link to the remote path (usually readable by anyone) // PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: f.filePath(remote), Path: f.filePath(remote),
@@ -1054,8 +942,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
} }
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
info, err := getAccountInfo(f.srv, f.user) info, err := f.getAccountInfo()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -1096,7 +984,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 { if t != hash.MD5 {
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1114,7 +1002,7 @@ func (o *Object) Size() int64 {
} }
// MimeType of an Object if known, "" otherwise // MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string { func (o *Object) MimeType() string {
return o.mimeType return o.mimeType
} }
@@ -1146,7 +1034,7 @@ func (o *Object) readMetaData(force bool) (err error) {
// //
// It attempts to read the objects mtime and if that isn't present the // It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers // LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
err := o.readMetaData(false) err := o.readMetaData(false)
if err != nil { if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err) fs.Logf(o, "Failed to read metadata: %v", err)
@@ -1156,7 +1044,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
} }
@@ -1166,7 +1054,7 @@ func (o *Object) Storable() bool {
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size) fs.FixRangeOption(options, o.size)
var resp *http.Response var resp *http.Response
opts := rest.Opts{ opts := rest.Opts{
@@ -1250,9 +1138,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
// If existing is set then it updates the object rather than creating a new one // If existing is set then it updates the object rather than creating a new one
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size() size := src.Size()
md5String, err := src.Hash(ctx, hash.MD5) md5String, err := src.Hash(hash.MD5)
if err != nil || md5String == "" { if err != nil || md5String == "" {
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
// back on after the buffering // back on after the buffering
@@ -1275,7 +1163,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Path: "allocate", Path: "allocate",
ExtraHeaders: make(map[string]string), ExtraHeaders: make(map[string]string),
} }
fileDate := api.Time(src.ModTime(ctx)).APIString() fileDate := api.Time(src.ModTime()).APIString()
// the allocate request // the allocate request
var request = api.AllocateFileRequest{ var request = api.AllocateFileRequest{
@@ -1339,7 +1227,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: o.filePath(), Path: o.filePath(),

View File

@@ -6,7 +6,7 @@ import (
"io" "io"
"testing" "testing"
"github.com/rclone/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@@ -4,8 +4,8 @@ package jottacloud_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/jottacloud" "github.com/ncw/rclone/backend/jottacloud"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -1,7 +1,6 @@
package koofr package koofr
import ( import (
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
@@ -11,11 +10,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/ncw/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient" httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient" koofrclient "github.com/koofr/go-koofrclient"
@@ -40,12 +39,6 @@ func init() {
Required: false, Required: false,
Default: "", Default: "",
Advanced: true, Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, { }, {
Name: "user", Name: "user",
Help: "Your Koofr user name", Help: "Your Koofr user name",
@@ -66,7 +59,6 @@ type Options struct {
MountID string `config:"mountid"` MountID string `config:"mountid"`
User string `config:"user"` User string `config:"user"`
Password string `config:"password"` Password string `config:"password"`
SetMTime bool `config:"setmtime"`
} }
// A Fs is a representation of a remote Koofr Fs // A Fs is a representation of a remote Koofr Fs
@@ -113,7 +105,7 @@ func (o *Object) Remote() string {
} }
// ModTime returns the modification time of the Object // ModTime returns the modification time of the Object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000) return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
} }
@@ -128,7 +120,7 @@ func (o *Object) Fs() fs.Info {
} }
// Hash returns an MD5 hash of the Object // Hash returns an MD5 hash of the Object
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) { func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 { if typ == hash.MD5 {
return o.info.Hash, nil return o.info.Hash, nil
} }
@@ -146,12 +138,12 @@ func (o *Object) Storable() bool {
} }
// SetModTime is not supported // SetModTime is not supported
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error { func (o *Object) SetModTime(mtime time.Time) error {
return fs.ErrorCantSetModTimeWithoutDelete return nil
} }
// Open opens the Object for reading // Open opens the Object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1 var sOff, eOff int64 = 0, -1
for _, option := range options { for _, option := range options {
@@ -185,13 +177,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
} }
// Update updates the Object contents // Update updates the Object contents
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000 putopts := &koofrclient.PutFilter{
putopts := &koofrclient.PutOptions{
ForceOverwrite: true, ForceOverwrite: true,
NoRename: true, NoRename: true,
OverwriteIgnoreNonExisting: true, IgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := o.fullPath() fullPath := o.fullPath()
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -200,7 +190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts) info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return err return err
} }
@@ -209,7 +199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// Remove deletes the remote Object // Remove deletes the remote Object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath()) return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
} }
@@ -235,11 +225,8 @@ func (f *Fs) Features() *fs.Features {
// Precision denotes that setting modification times is not supported // Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration { func (f *Fs) Precision() time.Duration {
if !f.opt.SetMTime {
return fs.ModTimeNotSupported return fs.ModTimeNotSupported
} }
return time.Millisecond
}
// Hashes returns a set of hashes are Provided by the Fs // Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
@@ -310,7 +297,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
} }
// List returns a list of items in a directory // List returns a list of items in a directory
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return nil, translateErrorsDir(err) return nil, translateErrorsDir(err)
@@ -331,7 +318,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
// NewObject creates a new remote Object for a given remote path // NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) { func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote)) info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
@@ -347,13 +334,11 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err e
} }
// Put updates a remote Object // Put updates a remote Object
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000 putopts := &koofrclient.PutFilter{
putopts := &koofrclient.PutOptions{
ForceOverwrite: true, ForceOverwrite: true,
NoRename: true, NoRename: true,
OverwriteIgnoreNonExisting: true, IgnoreNonExisting: true,
SetModified: &mtime,
} }
fullPath := f.fullPath(src.Remote()) fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath) dirPath := dir(fullPath)
@@ -362,7 +347,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
if err != nil { if err != nil {
return nil, err return nil, err
} }
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts) info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil { if err != nil {
return nil, translateErrorsObject(err) return nil, translateErrorsObject(err)
} }
@@ -374,8 +359,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// PutStream updates a remote Object with a stream of unknown size // PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// isBadRequest is a predicate which holds true iff the error returned was // isBadRequest is a predicate which holds true iff the error returned was
@@ -451,13 +436,13 @@ func (f *Fs) mkdir(fullPath string) error {
// Mkdir creates a directory at the given remote path. Creates ancestors if // Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary // necessary
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir) fullPath := f.fullPath(dir)
return f.mkdir(fullPath) return f.mkdir(fullPath)
} }
// Rmdir removes an (empty) directory at the given remote path // Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir)) files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil { if err != nil {
return translateErrorsDir(err) return translateErrorsDir(err)
@@ -473,25 +458,24 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} }
// Copy copies a remote Object to the given path // Copy copies a remote Object to the given path
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
err := f.mkdir(dstDir) err := f.mkdir(dstDir)
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
err = f.client.FilesCopy((src.(*Object)).fs.mountID, err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote), (src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime}) f.mountID, dstFullPath)
if err != nil { if err != nil {
return nil, fs.ErrorCantCopy return nil, fs.ErrorCantCopy
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// Move moves a remote Object to the given path // Move moves a remote Object to the given path
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object) srcObj := src.(*Object)
dstFullPath := f.fullPath(remote) dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath) dstDir := dir(dstFullPath)
@@ -504,11 +488,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil { if err != nil {
return nil, fs.ErrorCantMove return nil, fs.ErrorCantMove
} }
return f.NewObject(ctx, remote) return f.NewObject(remote)
} }
// DirMove moves a remote directory to the given path // DirMove moves a remote directory to the given path
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs) srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote) srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote) dstFullPath := f.fullPath(dstRemote)
@@ -528,7 +512,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
} }
// About reports space usage (with a MB precision) // About reports space usage (with a MB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID) mount, err := f.client.MountsDetails(f.mountID)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -544,7 +528,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
} }
// Purge purges the complete Fs // Purge purges the complete Fs
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath(""))) err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err return err
} }
@@ -596,7 +580,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
} }
// PublicLink creates a public link to the remote path // PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) { func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote)) linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil { if err != nil {
return "", translateErrorsDir(err) return "", translateErrorsDir(err)

View File

@@ -3,7 +3,7 @@ package koofr_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -3,15 +3,14 @@
package local package local
import ( import (
"context"
"syscall" "syscall"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
var s syscall.Statfs_t var s syscall.Statfs_t
err := syscall.Statfs(f.root, &s) err := syscall.Statfs(f.root, &s)
if err != nil { if err != nil {

View File

@@ -3,18 +3,17 @@
package local package local
import ( import (
"context"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/ncw/rclone/fs"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
) )
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
var available, total, free int64 var available, total, free int64
_, _, e1 := getFreeDiskSpace.Call( _, _, e1 := getFreeDiskSpace.Call(
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))), uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),

View File

@@ -3,7 +3,6 @@ package local
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -17,21 +16,20 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/accounting"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/file"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
) )
// Constants // Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
// Register with Fs // Register with Fs
func init() { func init() {
@@ -86,7 +84,7 @@ are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload. - source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (eg However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this [Glusterfs #2206](https://github.com/ncw/rclone/issues/2206)) so this
check can be disabled with this flag.`, check can be disabled with this flag.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
@@ -97,24 +95,6 @@ check can be disabled with this flag.`,
NoPrefix: true, NoPrefix: true,
ShortOpt: "x", ShortOpt: "x",
Advanced: true, Advanced: true,
}, {
Name: "case_sensitive",
Help: `Force the filesystem to report itself as case sensitive.
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}, {
Name: "case_insensitive",
Help: `Force the filesystem to report itself as case insensitive
Normally the local backend declares itself as case insensitive on
Windows/macOS and case sensitive for everything else. Use this flag
to override the default choice.`,
Default: false,
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -129,8 +109,6 @@ type Options struct {
NoCheckUpdated bool `config:"no_check_updated"` NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"` NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"` OneFileSystem bool `config:"one_file_system"`
CaseSensitive bool `config:"case_sensitive"`
CaseInsensitive bool `config:"case_insensitive"`
} }
// Fs represents a local filesystem rooted at root // Fs represents a local filesystem rooted at root
@@ -249,12 +227,6 @@ func (f *Fs) Features() *fs.Features {
// caseInsensitive returns whether the remote is case insensitive or not // caseInsensitive returns whether the remote is case insensitive or not
func (f *Fs) caseInsensitive() bool { func (f *Fs) caseInsensitive() bool {
if f.opt.CaseSensitive {
return false
}
if f.opt.CaseInsensitive {
return true
}
// FIXME not entirely accurate since you can have case // FIXME not entirely accurate since you can have case
// sensitive Fses on darwin and case insensitive Fses on linux. // sensitive Fses on darwin and case insensitive Fses on linux.
// Should probably check but that would involve creating a // Should probably check but that would involve creating a
@@ -330,7 +302,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
// NewObject finds the Object at remote. If it can't be found // NewObject finds the Object at remote. If it can't be found
// it returns the error ErrorObjectNotFound. // it returns the error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, "", nil) return f.newObjectWithInfo(remote, "", nil)
} }
@@ -343,7 +315,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// //
// This should return ErrDirNotFound if the directory isn't // This should return ErrDirNotFound if the directory isn't
// found. // found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
dir = f.dirNames.Load(dir) dir = f.dirNames.Load(dir)
fsDirPath := f.cleanPath(filepath.Join(f.root, dir)) fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
@@ -355,14 +327,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fd, err := os.Open(fsDirPath) fd, err := os.Open(fsDirPath)
if err != nil { if err != nil {
isPerm := os.IsPermission(err) return nil, errors.Wrapf(err, "failed to open directory %q", dir)
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
} }
defer func() { defer func() {
cerr := fd.Close() cerr := fd.Close()
@@ -372,38 +337,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}() }()
for { for {
var fis []os.FileInfo fis, err := fd.Readdir(1024)
if useReadDir {
// Windows and Plan9 read the directory entries with the stat information in which
// shouldn't fail because of unreadable entries.
fis, err = fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 { if err == io.EOF && len(fis) == 0 {
break break
} }
} else {
// For other OSes we read the names only (which shouldn't fail) then stat the
// individual ourselves so we can log errors but not fail the directory read.
var names []string
names, err = fd.Readdirnames(1024)
if err == io.EOF && len(names) == 0 {
break
}
if err == nil {
for _, name := range names {
namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath)
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
}
}
}
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read directory entry") return nil, errors.Wrapf(err, "failed to read directory %q", dir)
} }
for _, fi := range fis { for _, fi := range fis {
@@ -418,7 +357,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Skip bad symlinks // Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink")) err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err) fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats(ctx).Error(err) accounting.Stats.Error(err)
continue continue
} }
if err != nil { if err != nil {
@@ -508,11 +447,11 @@ func (m *mapper) Save(in, out string) string {
} }
// Put the Object to the local filesystem // Put the Object to the local filesystem
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
remote := src.Remote() remote := src.Remote()
// Temporary Object under construction - info filled in by Update() // Temporary Object under construction - info filled in by Update()
o := f.newObject(remote, "") o := f.newObject(remote, "")
err := o.Update(ctx, in, src, options...) err := o.Update(in, src, options...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -520,12 +459,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
} }
// PutStream uploads to the remote path with the modTime given of indeterminate size // PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...) return f.Put(in, src, options...)
} }
// Mkdir creates the directory if it doesn't exist // Mkdir creates the directory if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error { func (f *Fs) Mkdir(dir string) error {
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
root := f.cleanPath(filepath.Join(f.root, dir)) root := f.cleanPath(filepath.Join(f.root, dir))
err := os.MkdirAll(root, 0777) err := os.MkdirAll(root, 0777)
@@ -545,7 +484,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// Rmdir removes the directory // Rmdir removes the directory
// //
// If it isn't empty it will return an error // If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(dir string) error {
root := f.cleanPath(filepath.Join(f.root, dir)) root := f.cleanPath(filepath.Join(f.root, dir))
return os.Remove(root) return os.Remove(root)
} }
@@ -601,7 +540,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
} }
// If it matches - have found the precision // If it matches - have found the precision
// fmt.Println("compare", fi.ModTime(ctx), t) // fmt.Println("compare", fi.ModTime(), t)
if fi.ModTime().Equal(t) { if fi.ModTime().Equal(t) {
// fmt.Println("Precision detected as", duration) // fmt.Println("Precision detected as", duration)
return duration return duration
@@ -615,7 +554,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
// Optional interface: Only implement this if you have a way of // Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the // deleting all the files quicker than just running Remove() on the
// result of List() // result of List()
func (f *Fs) Purge(ctx context.Context) error { func (f *Fs) Purge() error {
fi, err := f.lstat(f.root) fi, err := f.lstat(f.root)
if err != nil { if err != nil {
return err return err
@@ -635,7 +574,7 @@ func (f *Fs) Purge(ctx context.Context) error {
// Will only be called if src.Fs().Name() == f.Name() // Will only be called if src.Fs().Name() == f.Name()
// //
// If it isn't possible then return fs.ErrorCantMove // If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object) srcObj, ok := src.(*Object)
if !ok { if !ok {
fs.Debugf(src, "Can't move - not same remote type") fs.Debugf(src, "Can't move - not same remote type")
@@ -694,7 +633,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// If it isn't possible then return fs.ErrorCantDirMove // If it isn't possible then return fs.ErrorCantDirMove
// //
// If destination exists then return fs.ErrorDirExists // If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs, ok := src.(*Fs) srcFs, ok := src.(*Fs)
if !ok { if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type") fs.Debugf(srcFs, "Can't move directory - not same remote type")
@@ -759,7 +698,7 @@ func (o *Object) Remote() string {
} }
// Hash returns the requested hash of a file as a lowercase hex string // Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { func (o *Object) Hash(r hash.Type) (string, error) {
// Check that the underlying file hasn't changed // Check that the underlying file hasn't changed
oldtime := o.modTime oldtime := o.modTime
oldsize := o.size oldsize := o.size
@@ -770,10 +709,9 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.objectHashesMu.Lock() o.fs.objectHashesMu.Lock()
hashes := o.hashes hashes := o.hashes
hashValue, hashFound := o.hashes[r]
o.fs.objectHashesMu.Unlock() o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound { if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
var in io.ReadCloser var in io.ReadCloser
if !o.translatedLink { if !o.translatedLink {
@@ -784,7 +722,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to open") return "", errors.Wrap(err, "hash: failed to open")
} }
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r)) hashes, err = hash.Stream(in)
closeErr := in.Close() closeErr := in.Close()
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to read") return "", errors.Wrap(err, "hash: failed to read")
@@ -792,16 +730,11 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if closeErr != nil { if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close") return "", errors.Wrap(closeErr, "hash: failed to close")
} }
hashValue = hashes[r]
o.fs.objectHashesMu.Lock() o.fs.objectHashesMu.Lock()
if o.hashes == nil {
o.hashes = hashes o.hashes = hashes
} else {
o.hashes[r] = hashValue
}
o.fs.objectHashesMu.Unlock() o.fs.objectHashesMu.Unlock()
} }
return hashValue, nil return hashes[r], nil
} }
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
@@ -810,12 +743,12 @@ func (o *Object) Size() int64 {
} }
// ModTime returns the modification time of the object // ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time { func (o *Object) ModTime() time.Time {
return o.modTime return o.modTime
} }
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(modTime time.Time) error {
var err error var err error
if o.translatedLink { if o.translatedLink {
err = lChtimes(o.path, modTime, modTime) err = lChtimes(o.path, modTime, modTime)
@@ -911,7 +844,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
} }
// Open an object for read // Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
var offset, limit int64 = 0, -1 var offset, limit int64 = 0, -1
hashes := hash.Supported hashes := hash.Supported
for _, option := range options { for _, option := range options {
@@ -975,7 +908,7 @@ func (nwc nopWriterCloser) Close() error {
} }
// Update the object from in with modTime and size // Update the object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
var out io.WriteCloser var out io.WriteCloser
hashes := hash.Supported hashes := hash.Supported
@@ -1056,7 +989,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.fs.objectHashesMu.Unlock() o.fs.objectHashesMu.Unlock()
// Set the mtime // Set the mtime
err = o.SetModTime(ctx, src.ModTime(ctx)) err = o.SetModTime(src.ModTime())
if err != nil { if err != nil {
return err return err
} }
@@ -1065,36 +998,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.lstat() return o.lstat()
} }
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
// Temporary Object under construction
o := f.newObject(remote, "")
err := o.mkdirAll()
if err != nil {
return nil, err
}
if o.translatedLink {
return nil, errors.New("can't open a symlink for random writing")
}
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return nil, err
}
// Pre-allocate the file for performance reasons
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
return out, nil
}
// setMetadata sets the file info from the os.FileInfo passed in // setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) { func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to // Don't overwrite the info if we don't need to
@@ -1120,7 +1023,7 @@ func (o *Object) lstat() error {
} }
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove() error {
return remove(o.path) return remove(o.path)
} }
@@ -1241,6 +1144,5 @@ var (
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{} _ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{} _ fs.DirMover = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
) )

View File

@@ -1,7 +1,6 @@
package local package local
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"path" "path"
@@ -10,12 +9,12 @@ import (
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap" "github.com/ncw/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/rclone/rclone/fstest" "github.com/ncw/rclone/fstest"
"github.com/rclone/rclone/lib/file" "github.com/ncw/rclone/lib/file"
"github.com/rclone/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -84,7 +83,6 @@ func TestUpdatingCheck(t *testing.T) {
} }
func TestSymlink(t *testing.T) { func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
@@ -133,7 +131,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink // Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z") modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false) file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows file3.Size = 0 // symlinks are 0 length under Windows
} }
@@ -152,7 +150,7 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText) assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object // Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix) o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote()) assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
@@ -160,11 +158,11 @@ func TestSymlink(t *testing.T) {
} }
// Check that NewObject doesn't see the non suffixed version // Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt") _, err = r.Flocal.NewObject("symlink2.txt")
require.Equal(t, fs.ErrorObjectNotFound, err) require.Equal(t, fs.ErrorObjectNotFound, err)
// Check reading the object // Check reading the object
in, err := o.Open(ctx) in, err := o.Open()
require.NoError(t, err) require.NoError(t, err)
contents, err := ioutil.ReadAll(in) contents, err := ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)
@@ -172,7 +170,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, in.Close()) require.NoError(t, in.Close())
// Check reading the object with range // Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5}) in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err) require.NoError(t, err)
contents, err = ioutil.ReadAll(in) contents, err = ioutil.ReadAll(in)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -4,8 +4,8 @@ package local_test
import ( import (
"testing" "testing"
"github.com/rclone/rclone/backend/local" "github.com/ncw/rclone/backend/local"
"github.com/rclone/rclone/fstest/fstests" "github.com/ncw/rclone/fstest/fstests"
) )
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote

View File

@@ -4,40 +4,16 @@ package local
import ( import (
"os" "os"
"sync/atomic"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
var (
fallocFlags = [...]uint32{
unix.FALLOC_FL_KEEP_SIZE, // Default
unix.FALLOC_FL_KEEP_SIZE | unix.FALLOC_FL_PUNCH_HOLE, // for ZFS #3066
}
fallocFlagsIndex int32
)
// preAllocate the file for performance reasons // preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error { func preAllocate(size int64, out *os.File) error {
if size <= 0 { if size <= 0 {
return nil return nil
} }
index := atomic.LoadInt32(&fallocFlagsIndex) err := unix.Fallocate(int(out.Fd()), unix.FALLOC_FL_KEEP_SIZE, 0, size)
again:
if index >= int32(len(fallocFlags)) {
return nil // Fallocate is disabled
}
flags := fallocFlags[index]
err := unix.Fallocate(int(out.Fd()), flags, 0, size)
if err == unix.ENOTSUP {
// Try the next flags combination
index++
atomic.StoreInt32(&fallocFlagsIndex, index)
fs.Debugf(nil, "preAllocate: got error on fallocate, trying combination %d/%d: %v", index, len(fallocFlags), err)
goto again
}
// FIXME could be doing something here // FIXME could be doing something here
// if err == unix.ENOSPC { // if err == unix.ENOSPC {
// log.Printf("No space") // log.Printf("No space")

View File

@@ -8,7 +8,7 @@ import (
"os" "os"
"syscall" "syscall"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
) )
// readDevice turns a valid os.FileInfo into a device number, // readDevice turns a valid os.FileInfo into a device number,

View File

@@ -7,7 +7,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/ncw/rclone/fs"
) )
const ( const (

Some files were not shown because too many files have changed in this diff Show More