mirror of
https://github.com/rclone/rclone.git
synced 2026-01-08 19:43:58 +00:00
Compare commits
7 Commits
fix-mega-b
...
v1.43.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
307b3442a5 | ||
|
|
a9ee9f8872 | ||
|
|
fa60290596 | ||
|
|
9eec3df300 | ||
|
|
0039e231c8 | ||
|
|
1550888bc3 | ||
|
|
0c11eec70e |
46
.appveyor.yml
Normal file
46
.appveyor.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
os: Windows Server 2012 R2
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: C:\gopath
|
||||||
|
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||||
|
ORIGPATH: '%PATH%'
|
||||||
|
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||||
|
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||||
|
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||||
|
PATH: '%PATHCC64%'
|
||||||
|
RCLONE_CONFIG_PASS:
|
||||||
|
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||||
|
|
||||||
|
install:
|
||||||
|
- choco install winfsp -y
|
||||||
|
- choco install zip -y
|
||||||
|
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- echo %PATH%
|
||||||
|
- echo %GOPATH%
|
||||||
|
- go version
|
||||||
|
- go env
|
||||||
|
- go install
|
||||||
|
- go build
|
||||||
|
- make log_since_last_release > %TEMP%\git-log.txt
|
||||||
|
- make version > %TEMP%\version
|
||||||
|
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||||
|
- set PATH=%PATHCC32%
|
||||||
|
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||||
|
- set PATH=%PATHCC64%
|
||||||
|
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||||
|
|
||||||
|
test_script:
|
||||||
|
- make GOTAGS=cmount quicktest
|
||||||
|
|
||||||
|
artifacts:
|
||||||
|
- path: rclone.exe
|
||||||
|
- path: build/*-v*.zip
|
||||||
|
|
||||||
|
deploy_script:
|
||||||
|
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||||
34
.circleci/config.yml
Normal file
34
.circleci/config.yml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
version: 2
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
machine: true
|
||||||
|
|
||||||
|
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Cross-compile rclone
|
||||||
|
command: |
|
||||||
|
docker pull billziss/xgo-cgofuse
|
||||||
|
go get -v github.com/karalabe/xgo
|
||||||
|
xgo \
|
||||||
|
--image=billziss/xgo-cgofuse \
|
||||||
|
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||||
|
-tags cmount \
|
||||||
|
.
|
||||||
|
xgo \
|
||||||
|
--targets=android/*,ios/* \
|
||||||
|
.
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Prepare artifacts
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/rclone.dist
|
||||||
|
cp -R rclone-* /tmp/rclone.dist
|
||||||
|
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/rclone.dist
|
||||||
7
.gitattributes
vendored
7
.gitattributes
vendored
@@ -1,7 +0,0 @@
|
|||||||
# Ignore generated files in GitHub language statistics and diffs
|
|
||||||
/MANUAL.* linguist-generated=true
|
|
||||||
/rclone.1 linguist-generated=true
|
|
||||||
|
|
||||||
# Don't fiddle with the line endings of test data
|
|
||||||
**/testdata/** -text
|
|
||||||
**/test/** -text
|
|
||||||
31
.github/ISSUE_TEMPLATE.md
vendored
31
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,31 +0,0 @@
|
|||||||
<!--
|
|
||||||
|
|
||||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
|
||||||
|
|
||||||
https://forum.rclone.org/
|
|
||||||
|
|
||||||
instead of filing an issue for a quick response.
|
|
||||||
|
|
||||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
|
||||||
|
|
||||||
https://github.com/rclone/rclone/issues/new
|
|
||||||
|
|
||||||
otherwise fill in the form below.
|
|
||||||
|
|
||||||
Thank you
|
|
||||||
|
|
||||||
The Rclone Developers
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
#### Output of `rclone version`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Describe the issue
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
36
.github/ISSUE_TEMPLATE/Feature.md
vendored
@@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest a new feature or enhancement for rclone
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Welcome :-)
|
|
||||||
|
|
||||||
So you've got an idea to improve rclone? We love that! You'll be glad to hear we've incorporated hundreds of ideas from contributors already.
|
|
||||||
|
|
||||||
Here is a checklist of things to do:
|
|
||||||
|
|
||||||
1. Please search the old issues first for your idea and +1 or comment on an existing issue if possible.
|
|
||||||
2. Discuss on the forum first: https://forum.rclone.org/
|
|
||||||
3. Make a feature request issue (this is the right place!).
|
|
||||||
4. Be prepared to get involved making the feature :-)
|
|
||||||
|
|
||||||
Looking forward to your great idea!
|
|
||||||
|
|
||||||
The Rclone Developers
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
|
|
||||||
#### What is your current rclone version (output from `rclone version`)?
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### What problem are you are trying to solve?
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### How do you think rclone should be changed to solve that?
|
|
||||||
|
|
||||||
|
|
||||||
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,29 +0,0 @@
|
|||||||
<!--
|
|
||||||
Thank you very much for contributing code or documentation to rclone! Please
|
|
||||||
fill out the following questions to make it easier for us to review your
|
|
||||||
changes.
|
|
||||||
|
|
||||||
You do not need to check all the boxes below all at once, feel free to take
|
|
||||||
your time and add more commits. If you're done and ready for review, please
|
|
||||||
check the last box.
|
|
||||||
-->
|
|
||||||
|
|
||||||
#### What is the purpose of this change?
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Describe the changes here
|
|
||||||
-->
|
|
||||||
|
|
||||||
#### Was the change discussed in an issue or in the forum before?
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Link issues and relevant forum posts here.
|
|
||||||
-->
|
|
||||||
|
|
||||||
#### Checklist
|
|
||||||
|
|
||||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
|
||||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
|
||||||
- [ ] I have added documentation for the changes if appropriate.
|
|
||||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
|
||||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
|
||||||
250
.github/workflows/build.yml
vendored
250
.github/workflows/build.yml
vendored
@@ -1,250 +0,0 @@
|
|||||||
---
|
|
||||||
# Github Actions build for rclone
|
|
||||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
|
||||||
|
|
||||||
name: build
|
|
||||||
|
|
||||||
# Trigger the workflow on push or pull request
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- '*'
|
|
||||||
tags:
|
|
||||||
- '*'
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
timeout-minutes: 60
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
|
|
||||||
|
|
||||||
include:
|
|
||||||
- job_name: linux
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'off'
|
|
||||||
gotags: cmount
|
|
||||||
build_flags: '-include "^linux/"'
|
|
||||||
check: true
|
|
||||||
quicktest: true
|
|
||||||
deploy: true
|
|
||||||
|
|
||||||
- job_name: mac
|
|
||||||
os: macOS-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'off'
|
|
||||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
|
||||||
build_flags: '-include "^darwin/amd64" -cgo'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
deploy: true
|
|
||||||
|
|
||||||
- job_name: windows_amd64
|
|
||||||
os: windows-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'off'
|
|
||||||
gotags: cmount
|
|
||||||
build_flags: '-include "^windows/amd64" -cgo'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
deploy: true
|
|
||||||
|
|
||||||
- job_name: windows_386
|
|
||||||
os: windows-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'off'
|
|
||||||
gotags: cmount
|
|
||||||
goarch: '386'
|
|
||||||
cgo: '1'
|
|
||||||
build_flags: '-include "^windows/386" -cgo'
|
|
||||||
quicktest: true
|
|
||||||
deploy: true
|
|
||||||
|
|
||||||
- job_name: other_os
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'off'
|
|
||||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
|
||||||
compile_all: true
|
|
||||||
deploy: true
|
|
||||||
|
|
||||||
- job_name: modules_race
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.13.x'
|
|
||||||
modules: 'on'
|
|
||||||
quicktest: true
|
|
||||||
racequicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.10
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.10.x'
|
|
||||||
modules: 'off'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.11
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.11.x'
|
|
||||||
modules: 'off'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
- job_name: go1.12
|
|
||||||
os: ubuntu-latest
|
|
||||||
go: '1.12.x'
|
|
||||||
modules: 'off'
|
|
||||||
quicktest: true
|
|
||||||
|
|
||||||
name: ${{ matrix.job_name }}
|
|
||||||
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@master
|
|
||||||
with:
|
|
||||||
path: ./src/github.com/${{ github.repository }}
|
|
||||||
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v1
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go }}
|
|
||||||
|
|
||||||
- name: Set environment variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
|
||||||
echo '::add-path::${{ runner.workspace }}/bin'
|
|
||||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
|
||||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
|
||||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
|
||||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
|
||||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
|
||||||
|
|
||||||
- name: Install Libraries on Linux
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo modprobe fuse
|
|
||||||
sudo chmod 666 /dev/fuse
|
|
||||||
sudo chown root:$USER /etc/fuse.conf
|
|
||||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on macOS
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
brew update
|
|
||||||
brew cask install osxfuse
|
|
||||||
if: matrix.os == 'macOS-latest'
|
|
||||||
|
|
||||||
- name: Install Libraries on Windows
|
|
||||||
shell: powershell
|
|
||||||
run: |
|
|
||||||
$ProgressPreference = 'SilentlyContinue'
|
|
||||||
choco install -y winfsp zip
|
|
||||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
|
||||||
if ($env:GOARCH -eq "386") {
|
|
||||||
choco install -y mingw --forcex86 --force
|
|
||||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
|
||||||
}
|
|
||||||
# Copy mingw32-make.exe to make.exe so the same command line
|
|
||||||
# can be used on Windows as on macOS and Linux
|
|
||||||
$path = (get-command mingw32-make.exe).Path
|
|
||||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
|
||||||
if: matrix.os == 'windows-latest'
|
|
||||||
|
|
||||||
- name: Print Go version and environment
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
printf "Using go at: $(which go)\n"
|
|
||||||
printf "Go version: $(go version)\n"
|
|
||||||
printf "\n\nGo environment:\n\n"
|
|
||||||
go env
|
|
||||||
printf "\n\nRclone environment:\n\n"
|
|
||||||
make vars
|
|
||||||
printf "\n\nSystem environment:\n\n"
|
|
||||||
env
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make
|
|
||||||
make quicktest
|
|
||||||
if: matrix.quicktest
|
|
||||||
|
|
||||||
- name: Race test
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make racequicktest
|
|
||||||
if: matrix.racequicktest
|
|
||||||
|
|
||||||
- name: Code quality test
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make build_dep
|
|
||||||
make check
|
|
||||||
if: matrix.check
|
|
||||||
|
|
||||||
- name: Compile all architectures test
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
make
|
|
||||||
make compile_all
|
|
||||||
if: matrix.compile_all
|
|
||||||
|
|
||||||
- name: Deploy built binaries
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
|
||||||
make travis_beta
|
|
||||||
env:
|
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
|
||||||
# working-directory: '$(modulePath)'
|
|
||||||
if: matrix.deploy && github.head_ref == ''
|
|
||||||
|
|
||||||
xgo:
|
|
||||||
timeout-minutes: 60
|
|
||||||
name: "xgo cross compile"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@master
|
|
||||||
with:
|
|
||||||
path: ./src/github.com/${{ github.repository }}
|
|
||||||
|
|
||||||
- name: Set environment variables
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
|
||||||
echo '::add-path::${{ runner.workspace }}/bin'
|
|
||||||
|
|
||||||
- name: Cross-compile rclone
|
|
||||||
run: |
|
|
||||||
docker pull billziss/xgo-cgofuse
|
|
||||||
go get -v github.com/karalabe/xgo
|
|
||||||
xgo \
|
|
||||||
-image=billziss/xgo-cgofuse \
|
|
||||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
|
||||||
-tags cmount \
|
|
||||||
-dest build \
|
|
||||||
.
|
|
||||||
xgo \
|
|
||||||
-image=billziss/xgo-cgofuse \
|
|
||||||
-targets=android/*,ios/* \
|
|
||||||
-dest build \
|
|
||||||
.
|
|
||||||
|
|
||||||
- name: Build rclone
|
|
||||||
run: |
|
|
||||||
docker pull golang
|
|
||||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
|
||||||
run: |
|
|
||||||
make circleci_upload
|
|
||||||
env:
|
|
||||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
|
||||||
if: github.head_ref == ''
|
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,6 +5,3 @@ build
|
|||||||
docs/public
|
docs/public
|
||||||
rclone.iml
|
rclone.iml
|
||||||
.idea
|
.idea
|
||||||
.history
|
|
||||||
*.test
|
|
||||||
*.log
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
# golangci-lint configuration options
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- deadcode
|
|
||||||
- errcheck
|
|
||||||
- goimports
|
|
||||||
- golint
|
|
||||||
- ineffassign
|
|
||||||
- structcheck
|
|
||||||
- varcheck
|
|
||||||
- govet
|
|
||||||
- unconvert
|
|
||||||
#- prealloc
|
|
||||||
#- maligned
|
|
||||||
disable-all: true
|
|
||||||
|
|
||||||
issues:
|
|
||||||
# Enable some lints excluded by default
|
|
||||||
exclude-use-default: false
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
|
||||||
max-per-linter: 0
|
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same-issues: 0
|
|
||||||
14
.gometalinter.json
Normal file
14
.gometalinter.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"Enable": [
|
||||||
|
"deadcode",
|
||||||
|
"errcheck",
|
||||||
|
"goimports",
|
||||||
|
"golint",
|
||||||
|
"ineffassign",
|
||||||
|
"structcheck",
|
||||||
|
"varcheck",
|
||||||
|
"vet"
|
||||||
|
],
|
||||||
|
"EnableGC": true,
|
||||||
|
"Vendor": true
|
||||||
|
}
|
||||||
51
.travis.yml
Normal file
51
.travis.yml
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
language: go
|
||||||
|
sudo: required
|
||||||
|
dist: trusty
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
go:
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- tip
|
||||||
|
before_install:
|
||||||
|
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||||
|
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||||
|
install:
|
||||||
|
- git fetch --unshallow --tags
|
||||||
|
- make vars
|
||||||
|
- make build_dep
|
||||||
|
script:
|
||||||
|
- make check
|
||||||
|
- make quicktest
|
||||||
|
- make compile_all
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- GOTAGS=cmount
|
||||||
|
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||||
|
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||||
|
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||||
|
addons:
|
||||||
|
apt:
|
||||||
|
packages:
|
||||||
|
- fuse
|
||||||
|
- libfuse-dev
|
||||||
|
- rpm
|
||||||
|
- pkg-config
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
include:
|
||||||
|
- os: osx
|
||||||
|
go: 1.11.x
|
||||||
|
env: GOTAGS=""
|
||||||
|
deploy:
|
||||||
|
provider: script
|
||||||
|
script: make travis_beta
|
||||||
|
skip_cleanup: true
|
||||||
|
on:
|
||||||
|
all_branches: true
|
||||||
|
go: 1.11.x
|
||||||
|
condition: $TRAVIS_PULL_REQUEST == false
|
||||||
141
CONTRIBUTING.md
141
CONTRIBUTING.md
@@ -21,20 +21,20 @@ with the [latest beta of rclone](https://beta.rclone.org/):
|
|||||||
## Submitting a pull request ##
|
## Submitting a pull request ##
|
||||||
|
|
||||||
If you find a bug that you'd like to fix, or a new feature that you'd
|
If you find a bug that you'd like to fix, or a new feature that you'd
|
||||||
like to implement then please submit a pull request via GitHub.
|
like to implement then please submit a pull request via Github.
|
||||||
|
|
||||||
If it is a big feature then make an issue first so it can be discussed.
|
If it is a big feature then make an issue first so it can be discussed.
|
||||||
|
|
||||||
You'll need a Go environment set up with GOPATH set. See [the Go
|
You'll need a Go environment set up with GOPATH set. See [the Go
|
||||||
getting started docs](https://golang.org/doc/install) for more info.
|
getting started docs](https://golang.org/doc/install) for more info.
|
||||||
|
|
||||||
First in your web browser press the fork button on [rclone's GitHub
|
First in your web browser press the fork button on [rclone's Github
|
||||||
page](https://github.com/rclone/rclone).
|
page](https://github.com/ncw/rclone).
|
||||||
|
|
||||||
Now in your terminal
|
Now in your terminal
|
||||||
|
|
||||||
go get -u github.com/rclone/rclone
|
go get -u github.com/ncw/rclone
|
||||||
cd $GOPATH/src/github.com/rclone/rclone
|
cd $GOPATH/src/github.com/ncw/rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||||
|
|
||||||
@@ -64,31 +64,22 @@ packages which you can install with
|
|||||||
|
|
||||||
Make sure you
|
Make sure you
|
||||||
|
|
||||||
* Add [documentation](#writing-documentation) for a new feature.
|
* Add documentation for a new feature (see below for where)
|
||||||
* Follow the [commit message guidelines](#commit-messages).
|
* Add unit tests for a new feature
|
||||||
* Add [unit tests](#testing) for a new feature
|
|
||||||
* squash commits down to one per feature
|
* squash commits down to one per feature
|
||||||
* rebase to master with `git rebase master`
|
* rebase to master `git rebase master`
|
||||||
|
|
||||||
When you are done with that
|
When you are done with that
|
||||||
|
|
||||||
git push origin my-new-feature
|
git push origin my-new-feature
|
||||||
|
|
||||||
Go to the GitHub website and click [Create pull
|
Go to the Github website and click [Create pull
|
||||||
request](https://help.github.com/articles/creating-a-pull-request/).
|
request](https://help.github.com/articles/creating-a-pull-request/).
|
||||||
|
|
||||||
You patch will get reviewed and you might get asked to fix some stuff.
|
You patch will get reviewed and you might get asked to fix some stuff.
|
||||||
|
|
||||||
If so, then make the changes in the same branch, squash the commits,
|
If so, then make the changes in the same branch, squash the commits,
|
||||||
rebase it to master then push it to GitHub with `--force`.
|
rebase it to master then push it to Github with `--force`.
|
||||||
|
|
||||||
## Enabling CI for your fork ##
|
|
||||||
|
|
||||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
|
||||||
|
|
||||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
|
||||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
|
||||||
websites, find your fork of rclone, and enable building there.
|
|
||||||
|
|
||||||
## Testing ##
|
## Testing ##
|
||||||
|
|
||||||
@@ -118,24 +109,17 @@ but they can be run against any of the remotes.
|
|||||||
|
|
||||||
cd fs/sync
|
cd fs/sync
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive: -fast-list
|
go test -v -remote TestDrive: -subdir
|
||||||
|
|
||||||
cd fs/operations
|
cd fs/operations
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
|
|
||||||
If you want to use the integration test framework to run these tests
|
|
||||||
all together with an HTML report and test retries then from the
|
|
||||||
project root:
|
|
||||||
|
|
||||||
go install github.com/rclone/rclone/fstest/test_all
|
|
||||||
test_all -backend drive
|
|
||||||
|
|
||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
make test
|
make test
|
||||||
|
|
||||||
This command is run daily on the integration test server. You can
|
This command is run daily on the the integration test server. You can
|
||||||
find the results at https://pub.rclone.org/integration-tests/
|
find the results at https://pub.rclone.org/integration-tests/
|
||||||
|
|
||||||
## Code Organisation ##
|
## Code Organisation ##
|
||||||
@@ -189,14 +173,10 @@ with modules beneath.
|
|||||||
|
|
||||||
If you are adding a new feature then please update the documentation.
|
If you are adding a new feature then please update the documentation.
|
||||||
|
|
||||||
If you add a new general flag (not for a backend), then document it in
|
If you add a new flag, then if it is a general flag, document it in
|
||||||
`docs/content/docs.md` - the flags there are supposed to be in
|
`docs/content/docs.md` - the flags there are supposed to be in
|
||||||
alphabetical order.
|
alphabetical order. If it is a remote specific flag, then document it
|
||||||
|
in `docs/content/remote.md`.
|
||||||
If you add a new backend option/flag, then it should be documented in
|
|
||||||
the source file in the `Help:` field. The first line of this is used
|
|
||||||
for the flag help, the remainder is shown to the user in `rclone
|
|
||||||
config` and is added to the docs with `make backenddocs`.
|
|
||||||
|
|
||||||
The only documentation you need to edit are the `docs/content/*.md`
|
The only documentation you need to edit are the `docs/content/*.md`
|
||||||
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
files. The MANUAL.*, rclone.1, web site etc are all auto generated
|
||||||
@@ -215,20 +195,14 @@ file.
|
|||||||
## Commit messages ##
|
## Commit messages ##
|
||||||
|
|
||||||
Please make the first line of your commit message a summary of the
|
Please make the first line of your commit message a summary of the
|
||||||
change that a user (not a developer) of rclone would like to read, and
|
change, and prefix it with the directory of the change followed by a
|
||||||
prefix it with the directory of the change followed by a colon. The
|
colon. The changelog gets made by looking at just these first lines
|
||||||
changelog gets made by looking at just these first lines so make it
|
so make it good!
|
||||||
good!
|
|
||||||
|
|
||||||
If you have more to say about the commit, then enter a blank line and
|
If you have more to say about the commit, then enter a blank line and
|
||||||
carry on the description. Remember to say why the change was needed -
|
carry on the description. Remember to say why the change was needed -
|
||||||
the commit itself shows what was changed.
|
the commit itself shows what was changed.
|
||||||
|
|
||||||
Writing more is better than less. Comparing the behaviour before the
|
|
||||||
change to that after the change is very useful. Imagine you are
|
|
||||||
writing to yourself in 12 months time when you've forgotten everything
|
|
||||||
about what you just did and you need to get up to speed quickly.
|
|
||||||
|
|
||||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||||
message. This can be on the subject line if it will fit. If you
|
message. This can be on the subject line if it will fit. If you
|
||||||
don't want to close the associated issue just put `#1234` and the
|
don't want to close the associated issue just put `#1234` and the
|
||||||
@@ -276,8 +250,9 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency, add it to
|
instructions below. These will fetch the dependency, add it to
|
||||||
`go.mod` and `go.sum` and vendor it for older go versions.
|
`go.mod` and `go.sum` and vendor it for older go versions.
|
||||||
|
|
||||||
GO111MODULE=on go get github.com/ncw/new_dependency
|
export GO111MODULE=on
|
||||||
GO111MODULE=on go mod vendor
|
go get github.com/ncw/new_dependency
|
||||||
|
go mod vendor
|
||||||
|
|
||||||
You can add constraints on that package when doing `go get` (see the
|
You can add constraints on that package when doing `go get` (see the
|
||||||
go docs linked above), but don't unless you really need to.
|
go docs linked above), but don't unless you really need to.
|
||||||
@@ -292,8 +267,9 @@ in `vendor`.
|
|||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
GO111MODULE=on go get -u github.com/pkg/errors
|
export GO111MODULE=on
|
||||||
GO111MODULE=on go mod vendor
|
go get -u github.com/pkg/errors
|
||||||
|
go mod vendor
|
||||||
|
|
||||||
Check in in a single commit as above.
|
Check in in a single commit as above.
|
||||||
|
|
||||||
@@ -341,12 +317,6 @@ Getting going
|
|||||||
* Add your remote to the imports in `backend/all/all.go`
|
* Add your remote to the imports in `backend/all/all.go`
|
||||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||||
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
|
||||||
* `go install -tags noencode`
|
|
||||||
* `rclone purge -v TestRemote:rclone-info`
|
|
||||||
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
|
|
||||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
|
||||||
* open `remote.csv` in a spreadsheet and examine
|
|
||||||
|
|
||||||
Unit tests
|
Unit tests
|
||||||
|
|
||||||
@@ -356,71 +326,26 @@ Unit tests
|
|||||||
|
|
||||||
Integration tests
|
Integration tests
|
||||||
|
|
||||||
* Add your backend to `fstest/test_all/config.yaml`
|
* Add your fs to `fstest/test_all/test_all.go`
|
||||||
* Once you've done that then you can use the integration test framework from the project root:
|
|
||||||
* go install ./...
|
|
||||||
* test_all -backend remote
|
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
|
||||||
|
|
||||||
* Make sure integration tests pass with
|
* Make sure integration tests pass with
|
||||||
* `cd fs/operations`
|
* `cd fs/operations`
|
||||||
* `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
* `cd fs/sync`
|
* `cd fs/sync`
|
||||||
* `go test -v -remote TestRemote:`
|
* `go test -v -remote TestRemote:`
|
||||||
* If your remote defines `ListR` check with this also
|
* If you are making a bucket based remote, then check with this also
|
||||||
|
* `go test -v -remote TestRemote: -subdir`
|
||||||
|
* And if your remote defines `ListR` this also
|
||||||
* `go test -v -remote TestRemote: -fast-list`
|
* `go test -v -remote TestRemote: -fast-list`
|
||||||
|
|
||||||
See the [testing](#testing) section for more information on integration tests.
|
See the [testing](#testing) section for more information on integration tests.
|
||||||
|
|
||||||
Add your fs to the docs - you'll need to pick an icon for it from
|
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
|
||||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
|
||||||
`Google Drive`) but with the local file system last.
|
|
||||||
|
|
||||||
* `README.md` - main GitHub page
|
* `README.md` - main Github page
|
||||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
* `docs/content/remote.md` - main docs page
|
||||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
|
||||||
* update them with `make backenddocs` - revert any changes in other backends
|
|
||||||
* `docs/content/overview.md` - overview docs
|
* `docs/content/overview.md` - overview docs
|
||||||
* `docs/content/docs.md` - list of remotes in config section
|
* `docs/content/docs.md` - list of remotes in config section
|
||||||
* `docs/content/about.md` - front page of rclone.org
|
* `docs/content/about.md` - front page of rclone.org
|
||||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||||
|
* `cmd/cmd.go` - the main help for rclone
|
||||||
Once you've written the docs, run `make serve` and check they look OK
|
|
||||||
in the web browser and the links (internal and external) all work.
|
|
||||||
|
|
||||||
## Writing a plugin ##
|
|
||||||
|
|
||||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
|
||||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
|
||||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
|
||||||
|
|
||||||
Usage
|
|
||||||
|
|
||||||
- Naming
|
|
||||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
|
||||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
|
||||||
- Example: A plugin with backend support for PiFS would be called
|
|
||||||
`librcloneplugin_backend_pifs.so`.
|
|
||||||
- Loading
|
|
||||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
|
||||||
- Supported on rclone v1.50 or greater.
|
|
||||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
|
||||||
- If this variable doesn't exist, plugin support is disabled.
|
|
||||||
- Plugins must be compiled against the exact version of rclone to work.
|
|
||||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
|
||||||
|
|
||||||
Building
|
|
||||||
|
|
||||||
To turn your existing additions into a Go plugin, move them to an external repository
|
|
||||||
and change the top-level package name to `main`.
|
|
||||||
|
|
||||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
|
||||||
|
|
||||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
|
||||||
|
|
||||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
|
||||||
|
|
||||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
|
||||||
|
|||||||
22
Dockerfile
22
Dockerfile
@@ -1,22 +0,0 @@
|
|||||||
FROM golang AS builder
|
|
||||||
|
|
||||||
COPY . /go/src/github.com/rclone/rclone/
|
|
||||||
WORKDIR /go/src/github.com/rclone/rclone/
|
|
||||||
|
|
||||||
RUN make quicktest
|
|
||||||
RUN \
|
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
|
||||||
make
|
|
||||||
RUN ./rclone version
|
|
||||||
|
|
||||||
# Begin final image
|
|
||||||
FROM alpine:latest
|
|
||||||
|
|
||||||
RUN apk --no-cache add ca-certificates fuse
|
|
||||||
|
|
||||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
|
||||||
|
|
||||||
ENTRYPOINT [ "rclone" ]
|
|
||||||
|
|
||||||
WORKDIR /data
|
|
||||||
ENV XDG_CONFIG_HOME=/config
|
|
||||||
@@ -1,17 +1,14 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Report a problem with rclone
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
||||||
Welcome :-) We understand you are having a problem with rclone; we want to help you with that!
|
Hi!
|
||||||
|
|
||||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum:
|
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||||
|
|
||||||
|
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||||
|
|
||||||
https://forum.rclone.org/
|
https://forum.rclone.org/
|
||||||
|
|
||||||
instead of filing an issue for a quick response.
|
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||||
|
|
||||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||||
|
|
||||||
@@ -19,7 +16,9 @@ If you think you might have found a bug, please can you try to replicate it with
|
|||||||
|
|
||||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||||
|
|
||||||
Thank you
|
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||||
|
|
||||||
|
Thanks
|
||||||
|
|
||||||
The Rclone Developers
|
The Rclone Developers
|
||||||
|
|
||||||
@@ -28,23 +27,17 @@ The Rclone Developers
|
|||||||
#### What is the problem you are having with rclone?
|
#### What is the problem you are having with rclone?
|
||||||
|
|
||||||
|
|
||||||
|
#### What is your rclone version (eg output from `rclone -V`)
|
||||||
#### What is your rclone version (output from `rclone version`)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Which cloud storage system are you using? (eg Google Drive)
|
#### Which cloud storage system are you using? (eg Google Drive)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||||
|
|
||||||
|
|
||||||
@@ -1,17 +1,12 @@
|
|||||||
# Maintainers guide for rclone #
|
# Maintainers guide for rclone #
|
||||||
|
|
||||||
Current active maintainers of rclone are:
|
Current active maintainers of rclone are
|
||||||
|
|
||||||
| Name | GitHub ID | Specific Responsibilities |
|
* Nick Craig-Wood @ncw
|
||||||
| :--------------- | :---------- | :-------------------------- |
|
* Stefan Breunig @breunigs
|
||||||
| Nick Craig-Wood | @ncw | overall project health |
|
* Ishuah Kariuki @ishuah
|
||||||
| Stefan Breunig | @breunigs | |
|
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||||
| Ishuah Kariuki | @ishuah | |
|
* Fabian Möller @B4dM4n
|
||||||
| Remus Bunduc | @remusb | cache backend |
|
|
||||||
| Fabian Möller | @B4dM4n | |
|
|
||||||
| Alex Chen | @Cnly | onedrive backend |
|
|
||||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
|
||||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
|
||||||
|
|
||||||
**This is a work in progress Draft**
|
**This is a work in progress Draft**
|
||||||
|
|
||||||
@@ -51,7 +46,7 @@ The milestones have these meanings:
|
|||||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||||
|
|
||||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||||
|
|
||||||
## Closing Tickets ##
|
## Closing Tickets ##
|
||||||
|
|
||||||
@@ -61,7 +56,7 @@ Close tickets as soon as you can - make sure they are tagged with a release. Po
|
|||||||
|
|
||||||
Try to process pull requests promptly!
|
Try to process pull requests promptly!
|
||||||
|
|
||||||
Merging pull requests on GitHub itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||||
|
|
||||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||||
|
|
||||||
|
|||||||
11197
MANUAL.html
11197
MANUAL.html
File diff suppressed because it is too large
Load Diff
12180
MANUAL.txt
12180
MANUAL.txt
File diff suppressed because it is too large
Load Diff
149
Makefile
149
Makefile
@@ -1,107 +1,113 @@
|
|||||||
SHELL = bash
|
SHELL = bash
|
||||||
# Branch we are working on
|
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
|
||||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
|
||||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
|
||||||
# Version of last release (may not be on this branch)
|
|
||||||
VERSION := $(shell cat VERSION)
|
|
||||||
# Last tag on this branch
|
|
||||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||||
# If we are working on a release, override branch to master
|
ifeq ($(BRANCH),$(LAST_TAG))
|
||||||
ifdef RELEASE_TAG
|
|
||||||
BRANCH := master
|
BRANCH := master
|
||||||
endif
|
endif
|
||||||
TAG_BRANCH := -$(BRANCH)
|
TAG_BRANCH := -$(BRANCH)
|
||||||
BRANCH_PATH := branch/
|
BRANCH_PATH := branch/
|
||||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
|
||||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||||
TAG_BRANCH :=
|
TAG_BRANCH :=
|
||||||
BRANCH_PATH :=
|
BRANCH_PATH :=
|
||||||
endif
|
endif
|
||||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||||
# TAG is current version + number of commits since last release + branch
|
ifneq ($(TAG),$(LAST_TAG))
|
||||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
|
||||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
|
||||||
ifndef RELEASE_TAG
|
|
||||||
TAG := $(TAG)-beta
|
TAG := $(TAG)-beta
|
||||||
endif
|
endif
|
||||||
GO_VERSION := $(shell go version)
|
GO_VERSION := $(shell go version)
|
||||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||||
ifdef BETA_SUBDIR
|
# Run full tests if go >= go1.11
|
||||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 11)')
|
||||||
endif
|
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
|
||||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||||
ifdef GOTAGS
|
ifdef GOTAGS
|
||||||
BUILDTAGS=-tags "$(GOTAGS)"
|
BUILDTAGS=-tags "$(GOTAGS)"
|
||||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: rclone test_all vars version
|
.PHONY: rclone vars version
|
||||||
|
|
||||||
rclone:
|
rclone:
|
||||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
touch fs/version.go
|
||||||
mkdir -p `go env GOPATH`/bin/
|
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
cp -av `go env GOPATH`/bin/rclone .
|
||||||
|
|
||||||
test_all:
|
|
||||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
@echo SHELL="'$(SHELL)'"
|
@echo SHELL="'$(SHELL)'"
|
||||||
@echo BRANCH="'$(BRANCH)'"
|
@echo BRANCH="'$(BRANCH)'"
|
||||||
@echo TAG="'$(TAG)'"
|
@echo TAG="'$(TAG)'"
|
||||||
@echo VERSION="'$(VERSION)'"
|
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||||
|
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||||
@echo BETA_URL="'$(BETA_URL)'"
|
@echo BETA_URL="'$(BETA_URL)'"
|
||||||
|
|
||||||
version:
|
version:
|
||||||
@echo '$(TAG)'
|
@echo '$(TAG)'
|
||||||
|
|
||||||
# Full suite of integration tests
|
# Full suite of integration tests
|
||||||
test: rclone test_all
|
test: rclone
|
||||||
-test_all 2>&1 | tee test_all.log
|
go install github.com/ncw/rclone/fstest/test_all
|
||||||
@echo "Written logs in test_all.log"
|
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||||
|
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||||
|
@echo "Written logs in test.log and fs/test_all.log"
|
||||||
|
|
||||||
# Quick test
|
# Quick test
|
||||||
quicktest:
|
quicktest:
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||||
|
ifdef FULL_TESTS
|
||||||
racequicktest:
|
|
||||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||||
|
endif
|
||||||
|
|
||||||
# Do source code quality checks
|
# Do source code quality checks
|
||||||
check: rclone
|
check: rclone
|
||||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
ifdef FULL_TESTS
|
||||||
@golangci-lint run $(LINTTAGS) ./...
|
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
errcheck $(BUILDTAGS) ./...
|
||||||
|
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||||
|
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||||
|
else
|
||||||
|
@echo Skipping source quality tests as version of go too old
|
||||||
|
endif
|
||||||
|
|
||||||
|
gometalinter_install:
|
||||||
|
go get -u github.com/alecthomas/gometalinter
|
||||||
|
gometalinter --install --update
|
||||||
|
|
||||||
|
# We aren't using gometalinter as the default linter yet because
|
||||||
|
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||||
|
# 2. can't get -printfuncs working with the vet linter
|
||||||
|
gometalinter:
|
||||||
|
gometalinter ./...
|
||||||
|
|
||||||
# Get the build dependencies
|
# Get the build dependencies
|
||||||
build_dep:
|
build_dep:
|
||||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
ifdef FULL_TESTS
|
||||||
|
go get -u github.com/kisielk/errcheck
|
||||||
|
go get -u golang.org/x/tools/cmd/goimports
|
||||||
|
go get -u github.com/golang/lint/golint
|
||||||
|
endif
|
||||||
|
|
||||||
# Get the release dependencies
|
# Get the release dependencies
|
||||||
release_dep:
|
release_dep:
|
||||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
go get -u github.com/goreleaser/nfpm/...
|
||||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
go get -u github.com/aktau/github-release
|
||||||
|
|
||||||
# Update dependencies
|
# Update dependencies
|
||||||
update:
|
update:
|
||||||
GO111MODULE=on go get -u ./...
|
GO111MODULE=on go get -u ./...
|
||||||
GO111MODULE=on go mod tidy
|
GO111MODULE=on go tidy
|
||||||
GO111MODULE=on go mod vendor
|
GO111MODULE=on go vendor
|
||||||
|
|
||||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||||
|
|
||||||
rclone.1: MANUAL.md
|
rclone.1: MANUAL.md
|
||||||
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
pandoc -s --from markdown --to man MANUAL.md -o rclone.1
|
||||||
|
|
||||||
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs backenddocs
|
MANUAL.md: bin/make_manual.py docs/content/*.md commanddocs
|
||||||
./bin/make_manual.py
|
./bin/make_manual.py
|
||||||
|
|
||||||
MANUAL.html: MANUAL.md
|
MANUAL.html: MANUAL.md
|
||||||
@@ -111,10 +117,7 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
rclone gendocs docs/content/commands/
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
@@ -149,8 +152,8 @@ check_sign:
|
|||||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||||
|
|
||||||
upload:
|
upload:
|
||||||
rclone -P copy build/ memstore:downloads-rclone-org/$(TAG)
|
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||||
rclone lsf build --files-only --include '*.{zip,deb,rpm}' --include version.txt | xargs -i bash -c 'i={}; j="$$i"; [[ $$i =~ (.*)(-v[0-9\.]+-)(.*) ]] && j=$${BASH_REMATCH[1]}-current-$${BASH_REMATCH[3]}; rclone copyto -v "memstore:downloads-rclone-org/$(TAG)/$$i" "memstore:downloads-rclone-org/$$j"'
|
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||||
|
|
||||||
upload_github:
|
upload_github:
|
||||||
./bin/upload-github $(TAG)
|
./bin/upload-github $(TAG)
|
||||||
@@ -167,7 +170,11 @@ log_since_last_release:
|
|||||||
git log $(LAST_TAG)..
|
git log $(LAST_TAG)..
|
||||||
|
|
||||||
compile_all:
|
compile_all:
|
||||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
ifdef FULL_TESTS
|
||||||
|
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||||
|
else
|
||||||
|
@echo Skipping compile all as version of go too old
|
||||||
|
endif
|
||||||
|
|
||||||
appveyor_upload:
|
appveyor_upload:
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
@@ -176,52 +183,50 @@ ifndef BRANCH_PATH
|
|||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
circleci_upload:
|
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||||
ifndef BRANCH_PATH
|
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
|
||||||
|
|
||||||
travis_beta:
|
travis_beta:
|
||||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||||
endif
|
endif
|
||||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||||
ifndef BRANCH_PATH
|
ifndef BRANCH_PATH
|
||||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||||
endif
|
endif
|
||||||
@echo Beta release ready at $(BETA_URL)
|
@echo Beta release ready at $(BETA_URL)
|
||||||
|
|
||||||
# Fetch the binary builds from travis and appveyor
|
# Fetch the binary builds from travis and appveyor
|
||||||
fetch_binaries:
|
fetch_binaries:
|
||||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
rclone -v sync $(BETA_UPLOAD) build/
|
||||||
|
|
||||||
serve: website
|
serve: website
|
||||||
cd docs && hugo server -v -w
|
cd docs && hugo server -v -w
|
||||||
|
|
||||||
tag: doc
|
tag: doc
|
||||||
@echo "Old tag is $(VERSION)"
|
@echo "Old tag is $(LAST_TAG)"
|
||||||
@echo "New tag is $(NEXT_VERSION)"
|
@echo "New tag is $(NEW_TAG)"
|
||||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||||
echo "$(NEXT_VERSION)" > VERSION
|
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
|
||||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||||
@echo "Then commit all the changes"
|
@echo "Then commit all the changes"
|
||||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||||
@echo "And finally run make retag before make cross etc"
|
@echo "And finally run make retag before make cross etc"
|
||||||
|
|
||||||
retag:
|
retag:
|
||||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||||
|
|
||||||
startdev:
|
startdev:
|
||||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||||
|
|
||||||
winzip:
|
winzip:
|
||||||
zip -9 rclone-$(TAG).zip rclone.exe
|
zip -9 rclone-$(TAG).zip rclone.exe
|
||||||
|
|
||||||
|
|||||||
127
README.md
127
README.md
@@ -1,107 +1,62 @@
|
|||||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
[](https://rclone.org/)
|
||||||
|
|
||||||
[Website](https://rclone.org) |
|
[Website](https://rclone.org) |
|
||||||
[Documentation](https://rclone.org/docs/) |
|
[Documentation](https://rclone.org/docs/) |
|
||||||
[Download](https://rclone.org/downloads/) |
|
|
||||||
[Contributing](CONTRIBUTING.md) |
|
[Contributing](CONTRIBUTING.md) |
|
||||||
[Changelog](https://rclone.org/changelog/) |
|
[Changelog](https://rclone.org/changelog/) |
|
||||||
[Installation](https://rclone.org/install/) |
|
[Installation](https://rclone.org/install/) |
|
||||||
[Forum](https://forum.rclone.org/)
|
[Forum](https://forum.rclone.org/)
|
||||||
|
[G+](https://google.com/+RcloneOrg)
|
||||||
|
|
||||||
[](https://travis-ci.org/rclone/rclone)
|
[](https://travis-ci.org/ncw/rclone)
|
||||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||||
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
[](https://godoc.org/github.com/ncw/rclone)
|
||||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
|
||||||
[](https://godoc.org/github.com/rclone/rclone)
|
|
||||||
[](https://hub.docker.com/r/rclone/rclone)
|
|
||||||
|
|
||||||
# Rclone
|
Rclone is a command line program to sync files and directories to and from
|
||||||
|
|
||||||
Rclone *("rsync for cloud storage")* is a command line program to sync files and directories to and from different cloud storage providers.
|
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||||
|
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||||
|
* Backblaze B2
|
||||||
|
* Box
|
||||||
|
* Dropbox
|
||||||
|
* FTP
|
||||||
|
* Google Cloud Storage
|
||||||
|
* Google Drive
|
||||||
|
* HTTP
|
||||||
|
* Hubic
|
||||||
|
* Jottacloud
|
||||||
|
* Mega
|
||||||
|
* Microsoft Azure Blob Storage
|
||||||
|
* Microsoft OneDrive
|
||||||
|
* OpenDrive
|
||||||
|
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||||
|
* pCloud
|
||||||
|
* QingStor
|
||||||
|
* SFTP
|
||||||
|
* Webdav / Owncloud / Nextcloud
|
||||||
|
* Yandex Disk
|
||||||
|
* The local filesystem
|
||||||
|
|
||||||
## Storage providers
|
Features
|
||||||
|
|
||||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
* MD5/SHA1 hashes checked at all times for file integrity
|
||||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
|
||||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
|
||||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
|
||||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
|
||||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
|
||||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
|
||||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
|
||||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
|
||||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
|
||||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
|
||||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
|
||||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
|
||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
|
||||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
|
||||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
|
||||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
|
||||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
|
||||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
|
||||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
|
||||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
|
||||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
|
||||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
|
||||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
|
||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
|
||||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
|
||||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
|
||||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
|
||||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
|
||||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
|
||||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
|
||||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
|
||||||
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
|
|
||||||
|
|
||||||
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
|
||||||
* Timestamps preserved on files
|
* Timestamps preserved on files
|
||||||
* Partial syncs supported on a whole file basis
|
* Partial syncs supported on a whole file basis
|
||||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
* Copy mode to just copy new/changed files
|
||||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
* Sync (one way) mode to make a directory identical
|
||||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
* Check mode to check for file hash equality
|
||||||
* Can sync to and from network, e.g. two different cloud accounts
|
* Can sync to and from network, eg two different cloud accounts
|
||||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
* Optional encryption (Crypt)
|
||||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
* Optional FUSE mount
|
||||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
|
||||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
|
||||||
* Multi-threaded downloads to local disk
|
|
||||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
|
||||||
|
|
||||||
## Installation & documentation
|
See the home page for installation, usage, documentation, changelog
|
||||||
|
and configuration walkthroughs.
|
||||||
|
|
||||||
Please see the [rclone website](https://rclone.org/) for:
|
* https://rclone.org/
|
||||||
|
|
||||||
* [Installation](https://rclone.org/install/)
|
|
||||||
* [Documentation & configuration](https://rclone.org/docs/)
|
|
||||||
* [Changelog](https://rclone.org/changelog/)
|
|
||||||
* [FAQ](https://rclone.org/faq/)
|
|
||||||
* [Storage providers](https://rclone.org/overview/)
|
|
||||||
* [Forum](https://forum.rclone.org/)
|
|
||||||
* ...and more
|
|
||||||
|
|
||||||
## Downloads
|
|
||||||
|
|
||||||
* https://rclone.org/downloads/
|
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
This is free software under the terms of MIT the license (check the
|
This is free software under the terms of MIT the license (check the
|
||||||
[COPYING file](/COPYING) included in this package).
|
COPYING file included in this package).
|
||||||
|
|||||||
82
RELEASE.md
82
RELEASE.md
@@ -1,14 +1,8 @@
|
|||||||
# Release
|
Extra required software for making a release
|
||||||
|
|
||||||
This file describes how to make the various kinds of releases
|
|
||||||
|
|
||||||
## Extra required software for making a release
|
|
||||||
|
|
||||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||||
* pandoc for making the html and man pages
|
* pandoc for making the html and man pages
|
||||||
|
|
||||||
## Making a release
|
Making a release
|
||||||
|
|
||||||
* git status - make sure everything is checked in
|
* git status - make sure everything is checked in
|
||||||
* Check travis & appveyor builds are green
|
* Check travis & appveyor builds are green
|
||||||
* make check
|
* make check
|
||||||
@@ -17,7 +11,7 @@ This file describes how to make the various kinds of releases
|
|||||||
* edit docs/content/changelog.md
|
* edit docs/content/changelog.md
|
||||||
* make doc
|
* make doc
|
||||||
* git status - to check for new man pages - git add them
|
* git status - to check for new man pages - git add them
|
||||||
* git commit -a -v -m "Version v1.XX.0"
|
* git commit -a -v -m "Version v1.XX"
|
||||||
* make retag
|
* make retag
|
||||||
* git push --tags origin master
|
* git push --tags origin master
|
||||||
* # Wait for the appveyor and travis builds to complete then...
|
* # Wait for the appveyor and travis builds to complete then...
|
||||||
@@ -32,78 +26,8 @@ This file describes how to make the various kinds of releases
|
|||||||
* # announce with forum post, twitter post, G+ post
|
* # announce with forum post, twitter post, G+ post
|
||||||
|
|
||||||
Early in the next release cycle update the vendored dependencies
|
Early in the next release cycle update the vendored dependencies
|
||||||
|
|
||||||
* Review any pinned packages in go.mod and remove if possible
|
* Review any pinned packages in go.mod and remove if possible
|
||||||
* make update
|
* make update
|
||||||
* git status
|
* git status
|
||||||
* git add new files
|
* git add new files
|
||||||
* git commit -a -v
|
* git commit -a -v
|
||||||
|
|
||||||
If `make update` fails with errors like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
# github.com/cpuguy83/go-md2man/md2man
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
|
||||||
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
|
|
||||||
```
|
|
||||||
|
|
||||||
Can be fixed with
|
|
||||||
|
|
||||||
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
|
|
||||||
* GO111MODULE=on go mod tidy
|
|
||||||
* GO111MODULE=on go mod vendor
|
|
||||||
|
|
||||||
|
|
||||||
## Making a point release
|
|
||||||
|
|
||||||
If rclone needs a point release due to some horrendous bug:
|
|
||||||
|
|
||||||
First make the release branch. If this is a second point release then
|
|
||||||
this will be done already.
|
|
||||||
|
|
||||||
* BASE_TAG=v1.XX # eg v1.49
|
|
||||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
|
||||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
|
||||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
|
||||||
|
|
||||||
Now
|
|
||||||
|
|
||||||
* git co ${BASE_TAG}-fixes
|
|
||||||
* git cherry-pick any fixes
|
|
||||||
* Test (see above)
|
|
||||||
* make NEXT_VERSION=${NEW_TAG} tag
|
|
||||||
* edit docs/content/changelog.md
|
|
||||||
* make TAG=${NEW_TAG} doc
|
|
||||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
|
||||||
* git tag -d ${NEW_TAG}
|
|
||||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
|
||||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
|
||||||
* Wait for builds to complete
|
|
||||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
|
||||||
* make TAG=${NEW_TAG} tarball
|
|
||||||
* make TAG=${NEW_TAG} sign_upload
|
|
||||||
* make TAG=${NEW_TAG} check_sign
|
|
||||||
* make TAG=${NEW_TAG} upload
|
|
||||||
* make TAG=${NEW_TAG} upload_website
|
|
||||||
* make TAG=${NEW_TAG} upload_github
|
|
||||||
* NB this overwrites the current beta so we need to do this
|
|
||||||
* git co master
|
|
||||||
* make LAST_TAG=${NEW_TAG} startdev
|
|
||||||
* # cherry pick the changes to the changelog and VERSION
|
|
||||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
|
||||||
* git commit --amend
|
|
||||||
* git push
|
|
||||||
* Announce!
|
|
||||||
|
|
||||||
## Making a manual build of docker
|
|
||||||
|
|
||||||
The rclone docker image should autobuild on docker hub. If it doesn't
|
|
||||||
or needs to be updated then rebuild like this.
|
|
||||||
|
|
||||||
```
|
|
||||||
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
|
||||||
docker push rclone/rclone:1.49.1
|
|
||||||
docker push rclone/rclone:1.49
|
|
||||||
docker push rclone/rclone:1
|
|
||||||
docker push rclone/rclone:latest
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -2,19 +2,20 @@ package alias
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fsi := &fs.RegInfo{
|
fsi := &fs.RegInfo{
|
||||||
Name: "alias",
|
Name: "alias",
|
||||||
Description: "Alias for an existing remote",
|
Description: "Alias for a existing remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
@@ -30,7 +31,7 @@ type Options struct {
|
|||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path.
|
// NewFs contstructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
@@ -46,9 +47,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(opt.Remote, name+":") {
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(opt.Remote)
|
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return fsInfo.NewFs(configName, fspath.JoinRootPath(fsPath, root), config)
|
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||||
|
if configName == "local" {
|
||||||
|
return fs.NewFs(root)
|
||||||
|
}
|
||||||
|
return fs.NewFs(configName + ":" + root)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,15 @@
|
|||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -70,7 +69,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
gotEntries, err := f.List(test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
|
|
||||||
sort.Sort(gotEntries)
|
sort.Sort(gotEntries)
|
||||||
@@ -81,7 +80,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
wantEntry := test.entries[i]
|
wantEntry := test.entries[i]
|
||||||
|
|
||||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||||
_, isDir := gotEntry.(fs.Directory)
|
_, isDir := gotEntry.(fs.Directory)
|
||||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,38 +2,29 @@ package all
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
// Active file systems
|
// Active file systems
|
||||||
_ "github.com/rclone/rclone/backend/alias"
|
_ "github.com/ncw/rclone/backend/alias"
|
||||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/azureblob"
|
_ "github.com/ncw/rclone/backend/azureblob"
|
||||||
_ "github.com/rclone/rclone/backend/b2"
|
_ "github.com/ncw/rclone/backend/b2"
|
||||||
_ "github.com/rclone/rclone/backend/box"
|
_ "github.com/ncw/rclone/backend/box"
|
||||||
_ "github.com/rclone/rclone/backend/cache"
|
_ "github.com/ncw/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/chunker"
|
_ "github.com/ncw/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/crypt"
|
_ "github.com/ncw/rclone/backend/drive"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
_ "github.com/ncw/rclone/backend/dropbox"
|
||||||
_ "github.com/rclone/rclone/backend/dropbox"
|
_ "github.com/ncw/rclone/backend/ftp"
|
||||||
_ "github.com/rclone/rclone/backend/fichier"
|
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||||
_ "github.com/rclone/rclone/backend/ftp"
|
_ "github.com/ncw/rclone/backend/http"
|
||||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
_ "github.com/ncw/rclone/backend/hubic"
|
||||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/ncw/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/hubic"
|
_ "github.com/ncw/rclone/backend/mega"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/ncw/rclone/backend/onedrive"
|
||||||
_ "github.com/rclone/rclone/backend/koofr"
|
_ "github.com/ncw/rclone/backend/opendrive"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/ncw/rclone/backend/pcloud"
|
||||||
_ "github.com/rclone/rclone/backend/mailru"
|
_ "github.com/ncw/rclone/backend/qingstor"
|
||||||
_ "github.com/rclone/rclone/backend/mega"
|
_ "github.com/ncw/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/onedrive"
|
_ "github.com/ncw/rclone/backend/sftp"
|
||||||
_ "github.com/rclone/rclone/backend/opendrive"
|
_ "github.com/ncw/rclone/backend/swift"
|
||||||
_ "github.com/rclone/rclone/backend/pcloud"
|
_ "github.com/ncw/rclone/backend/webdav"
|
||||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
_ "github.com/ncw/rclone/backend/yandex"
|
||||||
_ "github.com/rclone/rclone/backend/putio"
|
|
||||||
_ "github.com/rclone/rclone/backend/qingstor"
|
|
||||||
_ "github.com/rclone/rclone/backend/s3"
|
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
|
||||||
_ "github.com/rclone/rclone/backend/swift"
|
|
||||||
_ "github.com/rclone/rclone/backend/union"
|
|
||||||
_ "github.com/rclone/rclone/backend/webdav"
|
|
||||||
_ "github.com/rclone/rclone/backend/yandex"
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ we ignore assets completely!
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -22,24 +21,23 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
acd "github.com/ncw/go-acd"
|
"github.com/ncw/go-acd"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/dircache"
|
||||||
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
|
"github.com/ncw/rclone/lib/pacer"
|
||||||
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
enc = encodings.AmazonCloudDrive
|
|
||||||
folderKind = "FOLDER"
|
folderKind = "FOLDER"
|
||||||
fileKind = "FILE"
|
fileKind = "FILE"
|
||||||
statusAvailable = "AVAILABLE"
|
statusAvailable = "AVAILABLE"
|
||||||
@@ -99,42 +97,13 @@ func init() {
|
|||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_wait_per_gb",
|
Name: "upload_wait_per_gb",
|
||||||
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
|
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||||
|
|
||||||
Sometimes Amazon Drive gives an error when a file has been fully
|
|
||||||
uploaded but the file appears anyway after a little while. This
|
|
||||||
happens sometimes for files over 1GB in size and nearly every time for
|
|
||||||
files bigger than 10GB. This parameter controls the time rclone waits
|
|
||||||
for the file to appear.
|
|
||||||
|
|
||||||
The default value for this parameter is 3 minutes per GB, so by
|
|
||||||
default it will wait 3 minutes for every GB uploaded to see if the
|
|
||||||
file appears.
|
|
||||||
|
|
||||||
You can disable this feature by setting it to 0. This may cause
|
|
||||||
conflict errors as rclone retries the failed upload but the file will
|
|
||||||
most likely appear correctly eventually.
|
|
||||||
|
|
||||||
These values were determined empirically by observing lots of uploads
|
|
||||||
of big files for a range of file sizes.
|
|
||||||
|
|
||||||
Upload with the "-v" flag to see more info about what rclone is doing
|
|
||||||
in this situation.`,
|
|
||||||
Default: fs.Duration(180 * time.Second),
|
Default: fs.Duration(180 * time.Second),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "templink_threshold",
|
Name: "templink_threshold",
|
||||||
Help: `Files >= this size will be downloaded via their tempLink.
|
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||||
|
|
||||||
Files this size or more will be downloaded via their "tempLink". This
|
|
||||||
is to work around a problem with Amazon Drive which blocks downloads
|
|
||||||
of files bigger than about 10GB. The default for this is 9GB which
|
|
||||||
shouldn't need to be changed.
|
|
||||||
|
|
||||||
To download files above this threshold, rclone requests a "tempLink"
|
|
||||||
which downloads the file through a temporary URL directly from the
|
|
||||||
underlying S3 storage.`,
|
|
||||||
Default: defaultTempLinkThreshold,
|
Default: defaultTempLinkThreshold,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
@@ -157,7 +126,7 @@ type Fs struct {
|
|||||||
noAuthClient *http.Client // unauthenticated http client
|
noAuthClient *http.Client // unauthenticated http client
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *pacer.Pacer // pacer for API calls
|
||||||
trueRootID string // ID of true root directory
|
trueRootID string // ID of true root directory
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
}
|
}
|
||||||
@@ -249,7 +218,6 @@ func filterRequest(req *http.Request) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -267,7 +235,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Amazon Drive")
|
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c := acd.NewClient(oAuthClient)
|
c := acd.NewClient(oAuthClient)
|
||||||
@@ -276,7 +244,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
c: c,
|
c: c,
|
||||||
pacer: fs.NewPacer(pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
|
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||||
noAuthClient: fshttp.NewClient(fs.Config),
|
noAuthClient: fshttp.NewClient(fs.Config),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -311,20 +279,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
tempF := *f
|
newF := *f
|
||||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
newF.dirCache = dircache.New(newRoot, f.trueRootID, &newF)
|
||||||
tempF.root = newRoot
|
newF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(ctx, false)
|
err = newF.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
_, err := newF.newObjectWithInfo(remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -332,13 +300,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
|
||||||
f.dirCache = tempF.dirCache
|
|
||||||
f.root = tempF.root
|
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return &newF, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
@@ -356,7 +319,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -365,7 +328,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
|
|||||||
// Set info but not meta
|
// Set info but not meta
|
||||||
o.info = info
|
o.info = info
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
err := o.readMetaData() // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -375,18 +338,18 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Nod
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var subFolder *acd.Folder
|
var subFolder *acd.Folder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
|
subFolder, resp, err = folder.GetFolder(leaf)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -407,13 +370,13 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *acd.Folder
|
var info *acd.Folder
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
|
info, resp, err = folder.CreateFolder(leaf)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -481,7 +444,6 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||||||
if !hasValidParent {
|
if !hasValidParent {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
*node.Name = enc.ToStandardName(*node.Name)
|
|
||||||
// Store the nodes up in case we have to retry the listing
|
// Store the nodes up in case we have to retry the listing
|
||||||
out = append(out, node)
|
out = append(out, node)
|
||||||
}
|
}
|
||||||
@@ -506,12 +468,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -529,7 +491,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
case fileKind:
|
case fileKind:
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, node)
|
o, err := f.newObjectWithInfo(remote, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -573,7 +535,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// At the end of large uploads. The speculation is that the timeout
|
// At the end of large uploads. The speculation is that the timeout
|
||||||
// is waiting for the sha1 hashing to complete and the file may well
|
// is waiting for the sha1 hashing to complete and the file may well
|
||||||
// be properly uploaded.
|
// be properly uploaded.
|
||||||
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||||
// Return if no error - all is well
|
// Return if no error - all is well
|
||||||
if inErr == nil {
|
if inErr == nil {
|
||||||
return false, inInfo, inErr
|
return false, inInfo, inErr
|
||||||
@@ -613,7 +575,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
|||||||
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
for i := 1; i <= retries; i++ {
|
for i := 1; i <= retries; i++ {
|
||||||
o, err := f.NewObject(ctx, remote)
|
o, err := f.NewObject(remote)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -639,7 +601,7 @@ func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader,
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
@@ -648,17 +610,17 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
// Check if object already exists
|
// Check if object already exists
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData()
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If not create it
|
// If not create it
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -671,10 +633,10 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
f.tokenRenewer.Start()
|
f.tokenRenewer.Start()
|
||||||
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
|
info, resp, err = folder.Put(in, leaf)
|
||||||
f.tokenRenewer.Stop()
|
f.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -688,13 +650,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(dir string) error {
|
||||||
err := f.dirCache.FindRoot(ctx, true)
|
err := f.dirCache.FindRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
_, err = f.dirCache.FindDir(dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -708,7 +670,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -717,15 +679,15 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the destination directory if necessary
|
// create the destination directory if necessary
|
||||||
err := f.dirCache.FindRoot(ctx, true)
|
err := f.dirCache.FindRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -741,12 +703,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
srcErr, dstErr error
|
srcErr, dstErr error
|
||||||
)
|
)
|
||||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
|
||||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on source
|
// exit if error on source
|
||||||
return nil, srcErr
|
return nil, srcErr
|
||||||
}
|
}
|
||||||
dstObj, dstErr = f.NewObject(ctx, remote)
|
dstObj, dstErr = f.NewObject(remote)
|
||||||
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on dst
|
// exit if error on dst
|
||||||
return nil, dstErr
|
return nil, dstErr
|
||||||
@@ -775,7 +737,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "DirMove error: not same remote type")
|
fs.Debugf(src, "DirMove error: not same remote type")
|
||||||
@@ -791,14 +753,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
err = srcFs.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(ctx, true)
|
err = f.dirCache.FindRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -813,14 +775,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -836,7 +798,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if srcRemote == "" {
|
if srcRemote == "" {
|
||||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||||
} else {
|
} else {
|
||||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -844,7 +806,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
srcLeaf, _ := dircache.SplitPath(srcPath)
|
srcLeaf, _ := dircache.SplitPath(srcPath)
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -877,17 +839,17 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
|
|
||||||
// purgeCheck remotes the root directory, if check is set then it
|
// purgeCheck remotes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(ctx, false)
|
err := dc.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(ctx, dir, false)
|
rootID, err := dc.FindDir(dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -936,8 +898,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
return f.purgeCheck(ctx, dir, true)
|
return f.purgeCheck(dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -959,7 +921,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
// srcObj, ok := src.(*Object)
|
// srcObj, ok := src.(*Object)
|
||||||
// if !ok {
|
// if !ok {
|
||||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -970,7 +932,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return nil, err
|
// return nil, err
|
||||||
// }
|
// }
|
||||||
// return f.NewObject(ctx, remote), nil
|
// return f.NewObject(remote), nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
@@ -978,8 +940,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge() error {
|
||||||
return f.purgeCheck(ctx, "", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -1003,7 +965,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -1026,11 +988,11 @@ func (o *Object) Size() int64 {
|
|||||||
// it also sets the info
|
// it also sets the info
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
func (o *Object) readMetaData() (err error) {
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
@@ -1041,7 +1003,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *acd.File
|
var info *acd.File
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
|
info, resp, err = folder.GetFile(leaf)
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1059,8 +1021,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@@ -1074,7 +1036,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(modTime time.Time) error {
|
||||||
// FIXME not implemented
|
// FIXME not implemented
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
@@ -1085,7 +1047,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||||
if bigObject {
|
if bigObject {
|
||||||
fs.Debugf(o, "Downloading large object via tempLink")
|
fs.Debugf(o, "Downloading large object via tempLink")
|
||||||
@@ -1097,7 +1059,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
if !bigObject {
|
if !bigObject {
|
||||||
in, resp, err = file.OpenHeaders(headers)
|
in, resp, err = file.OpenHeaders(headers)
|
||||||
} else {
|
} else {
|
||||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
|
||||||
}
|
}
|
||||||
return o.fs.shouldRetry(resp, err)
|
return o.fs.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
@@ -1107,7 +1069,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
file := acd.File{Node: o.info}
|
file := acd.File{Node: o.info}
|
||||||
var info *acd.File
|
var info *acd.File
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@@ -1118,7 +1080,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
info, resp, err = file.Overwrite(in)
|
info, resp, err = file.Overwrite(in)
|
||||||
o.fs.tokenRenewer.Stop()
|
o.fs.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@@ -1143,7 +1105,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove() error {
|
||||||
return o.fs.removeNode(o.info)
|
return o.fs.removeNode(o.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1161,7 +1123,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
|||||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
|
newInfo, resp, err = info.Rename(newName)
|
||||||
return f.shouldRetry(resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return newInfo, err
|
return newInfo, err
|
||||||
@@ -1265,7 +1227,7 @@ OnConflict:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
func (o *Object) MimeType() string {
|
||||||
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
||||||
return *o.info.ContentProperties.ContentType
|
return *o.info.ContentProperties.ContentType
|
||||||
}
|
}
|
||||||
@@ -1278,38 +1240,24 @@ func (o *Object) MimeType(ctx context.Context) string {
|
|||||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||||
checkpoint := f.opt.Checkpoint
|
checkpoint := f.opt.Checkpoint
|
||||||
|
|
||||||
|
quit := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
var ticker *time.Ticker
|
|
||||||
var tickerC <-chan time.Time
|
|
||||||
for {
|
for {
|
||||||
|
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||||
|
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||||
|
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case pollInterval, ok := <-pollIntervalChan:
|
case <-quit:
|
||||||
if !ok {
|
return
|
||||||
if ticker != nil {
|
case <-time.After(pollInterval):
|
||||||
ticker.Stop()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if pollInterval == 0 {
|
|
||||||
if ticker != nil {
|
|
||||||
ticker.Stop()
|
|
||||||
ticker, tickerC = nil, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ticker = time.NewTicker(pollInterval)
|
|
||||||
tickerC = ticker.C
|
|
||||||
}
|
|
||||||
case <-tickerC:
|
|
||||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
|
||||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
|
||||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
return quit
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||||
@@ -1357,11 +1305,10 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
|||||||
if len(node.Parents) > 0 {
|
if len(node.Parents) > 0 {
|
||||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||||
// and append the drive file name to compute the full file name
|
// and append the drive file name to compute the full file name
|
||||||
name := enc.ToStandardName(*node.Name)
|
|
||||||
if len(path) > 0 {
|
if len(path) > 0 {
|
||||||
path = path + "/" + name
|
path = path + "/" + *node.Name
|
||||||
} else {
|
} else {
|
||||||
path = name
|
path = *node.Name
|
||||||
}
|
}
|
||||||
// this will now clear the actual file too
|
// this will now clear the actual file too
|
||||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||||
|
|||||||
@@ -7,9 +7,9 @@ package amazonclouddrive_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
"github.com/ncw/rclone/backend/amazonclouddrive"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,18 +0,0 @@
|
|||||||
// +build !plan9,!solaris
|
|
||||||
|
|
||||||
package azureblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
// Check first feature flags are set on this
|
|
||||||
// remote
|
|
||||||
enabled := f.Features().SetTier
|
|
||||||
assert.True(t, enabled)
|
|
||||||
enabled = f.Features().GetTier
|
|
||||||
assert.True(t, enabled)
|
|
||||||
}
|
|
||||||
@@ -1,37 +1,20 @@
|
|||||||
// Test AzureBlob filesystem interface
|
// Test AzureBlob filesystem interface
|
||||||
|
|
||||||
// +build !plan9,!solaris
|
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||||
|
|
||||||
package azureblob
|
package azureblob_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/backend/azureblob"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestAzureBlob:",
|
RemoteName: "TestAzureBlob:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*azureblob.Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool"},
|
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MaxChunkSize: maxChunkSize,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadChunkSize(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Build for azureblob for unsupported platforms to stop go complaining
|
// Build for azureblob for unsupported platforms to stop go complaining
|
||||||
// about "no buildable Go source files "
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
// +build plan9 solaris
|
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||||
|
|
||||||
package azureblob
|
package azureblob
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error describes a B2 error response
|
// Error describes a B2 error response
|
||||||
@@ -17,12 +17,12 @@ type Error struct {
|
|||||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error satisfies the error interface
|
// Error statisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal satisfies the Fatal interface
|
// Fatal statisfies the Fatal interface
|
||||||
//
|
//
|
||||||
// It indicates which errors should be treated as fatal
|
// It indicates which errors should be treated as fatal
|
||||||
func (e *Error) Fatal() bool {
|
func (e *Error) Fatal() bool {
|
||||||
@@ -50,7 +50,7 @@ type Timestamp time.Time
|
|||||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||||
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
|
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON turns JSON into a Timestamp
|
// UnmarshalJSON turns JSON into a Timestamp
|
||||||
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
|
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
|||||||
return Timestamp(newT), base[:versionStart] + ext
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is uninitialized
|
// IsZero returns true if the timestamp is unitialised
|
||||||
func (t Timestamp) IsZero() bool {
|
func (t Timestamp) IsZero() bool {
|
||||||
return time.Time(t).IsZero()
|
return time.Time(t).IsZero()
|
||||||
}
|
}
|
||||||
@@ -136,7 +136,6 @@ type AuthorizeAccountResponse struct {
|
|||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
@@ -189,21 +188,6 @@ type GetUploadURLResponse struct {
|
|||||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
|
|
||||||
type GetDownloadAuthorizationRequest struct {
|
|
||||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
|
||||||
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
|
|
||||||
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
|
|
||||||
type GetDownloadAuthorizationResponse struct {
|
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
|
||||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
|
||||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||||
type FileInfo struct {
|
type FileInfo struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
@@ -326,14 +310,3 @@ type CancelLargeFileResponse struct {
|
|||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyFileRequest is as passed to b2_copy_file
|
|
||||||
type CopyFileRequest struct {
|
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
|
||||||
Name string `json:"fileName"` // The name of the new file being created.
|
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
|
||||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
|
||||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
|
||||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
|
||||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
"github.com/ncw/rclone/backend/b2/api"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/ncw/rclone/fstest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|||||||
1123
backend/b2/b2.go
1123
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/ncw/rclone/fstest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test b2 string encoding
|
// Test b2 string encoding
|
||||||
|
|||||||
@@ -1,34 +1,17 @@
|
|||||||
// Test B2 filesystem interface
|
// Test B2 filesystem interface
|
||||||
package b2
|
package b2_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/backend/b2"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestB2:",
|
RemoteName: "TestB2:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*b2.Object)(nil),
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: minChunkSize,
|
|
||||||
NeedMultipleChunks: true,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadChunkSize(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ package b2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -15,12 +14,12 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/b2/api"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/b2/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashAppendingReader struct {
|
type hashAppendingReader struct {
|
||||||
@@ -81,7 +80,7 @@ type largeUpload struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||||
remote := o.remote
|
remote := o.remote
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := int64(0)
|
parts := int64(0)
|
||||||
@@ -99,34 +98,31 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
sha1SliceSize = parts
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime()
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_start_large_file",
|
Path: "/b2_start_large_file",
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucketID, err := f.getBucketID()
|
||||||
bucketID, err := f.getBucketID(ctx, bucket)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var request = api.StartLargeFileRequest{
|
var request = api.StartLargeFileRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: enc.FromStandardPath(bucketPath),
|
Name: o.fs.root + remote,
|
||||||
ContentType: fs.MimeType(ctx, src),
|
ContentType: fs.MimeType(src),
|
||||||
Info: map[string]string{
|
Info: map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum {
|
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
request.Info[sha1Key] = calculatedSha1
|
||||||
request.Info[sha1Key] = calculatedSha1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
var response api.StartLargeFileResponse
|
var response api.StartLargeFileResponse
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -150,7 +146,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||||
//
|
//
|
||||||
// This should be returned with returnUploadURL when finished
|
// This should be returned with returnUploadURL when finished
|
||||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||||
up.uploadMu.Lock()
|
up.uploadMu.Lock()
|
||||||
defer up.uploadMu.Unlock()
|
defer up.uploadMu.Unlock()
|
||||||
if len(up.uploads) == 0 {
|
if len(up.uploads) == 0 {
|
||||||
@@ -162,8 +158,8 @@ func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadP
|
|||||||
ID: up.id,
|
ID: up.id,
|
||||||
}
|
}
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
return up.f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||||
@@ -192,12 +188,12 @@ func (up *largeUpload) clearUploadURL() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer a chunk
|
// Transfer a chunk
|
||||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||||
|
|
||||||
// Get upload URL
|
// Get upload URL
|
||||||
upload, err := up.getUploadURL(ctx)
|
upload, err := up.getUploadURL()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -241,8 +237,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(resp, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||||
}
|
}
|
||||||
@@ -264,7 +260,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// finish closes off the large upload
|
// finish closes off the large upload
|
||||||
func (up *largeUpload) finish(ctx context.Context) error {
|
func (up *largeUpload) finish() error {
|
||||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@@ -276,8 +272,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
var response api.FileInfo
|
var response api.FileInfo
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
return up.f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -286,7 +282,7 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cancel aborts the large upload
|
// cancel aborts the large upload
|
||||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
func (up *largeUpload) cancel() error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_cancel_large_file",
|
Path: "/b2_cancel_large_file",
|
||||||
@@ -296,18 +292,18 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
var response api.CancelLargeFileResponse
|
var response api.CancelLargeFileResponse
|
||||||
err := up.f.pacer.Call(func() (bool, error) {
|
err := up.f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||||
return up.f.shouldRetry(ctx, resp, err)
|
return up.f.shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(part int64, buf []byte) {
|
go func(part int64, buf []byte) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer up.f.putUploadBlock(buf)
|
defer up.f.putUploadBlock(buf)
|
||||||
err := up.transferChunk(ctx, part, buf)
|
err := up.transferChunk(part, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
select {
|
select {
|
||||||
case errs <- err:
|
case errs <- err:
|
||||||
@@ -317,7 +313,7 @@ func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGr
|
|||||||
}(part, buf)
|
}(part, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case err = <-errs:
|
||||||
@@ -326,19 +322,19 @@ func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, err
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||||
cancelErr := up.cancel(ctx)
|
cancelErr := up.cancel()
|
||||||
if cancelErr != nil {
|
if cancelErr != nil {
|
||||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.finish(ctx)
|
return up.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream uploads the chunks from the input, starting with a required initial
|
// Stream uploads the chunks from the input, starting with a required initial
|
||||||
// chunk. Assumes the file size is unknown and will upload until the input
|
// chunk. Assumes the file size is unknown and will upload until the input
|
||||||
// reaches EOF.
|
// reaches EOF.
|
||||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
hasMoreParts := true
|
hasMoreParts := true
|
||||||
@@ -346,7 +342,7 @@ func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (e
|
|||||||
|
|
||||||
// Transfer initial chunk
|
// Transfer initial chunk
|
||||||
up.size = int64(len(initialUploadBlock))
|
up.size = int64(len(initialUploadBlock))
|
||||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||||
|
|
||||||
outer:
|
outer:
|
||||||
for part := int64(2); hasMoreParts; part++ {
|
for part := int64(2); hasMoreParts; part++ {
|
||||||
@@ -388,16 +384,16 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the chunk
|
// Transfer the chunk
|
||||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
up.managedTransferChunk(&wg, errs, part, buf)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
up.sha1s = up.sha1s[:up.parts]
|
up.sha1s = up.sha1s[:up.parts]
|
||||||
|
|
||||||
return up.finishOrCancelOnError(ctx, err, errs)
|
return up.finishOrCancelOnError(err, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload uploads the chunks from the input
|
// Upload uploads the chunks from the input
|
||||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
func (up *largeUpload) Upload() error {
|
||||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||||
remaining := up.size
|
remaining := up.size
|
||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
@@ -428,10 +424,10 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the chunk
|
// Transfer the chunk
|
||||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
up.managedTransferChunk(&wg, errs, part, buf)
|
||||||
remaining -= reqSize
|
remaining -= reqSize
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
return up.finishOrCancelOnError(ctx, err, errs)
|
return up.finishOrCancelOnError(err, errs)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ type Error struct {
|
|||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
// Error returns a string for the error and statistifes the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
@@ -57,11 +57,11 @@ func (e *Error) Error() string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Error satisfies the error interface
|
// Check Error statisfies the error interface
|
||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link"
|
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||||
|
|
||||||
// Types of things in Item
|
// Types of things in Item
|
||||||
const (
|
const (
|
||||||
@@ -86,10 +86,6 @@ type Item struct {
|
|||||||
ContentCreatedAt Time `json:"content_created_at"`
|
ContentCreatedAt Time `json:"content_created_at"`
|
||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||||
SharedLink struct {
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
Access string `json:"access,omitempty"`
|
|
||||||
} `json:"shared_link"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the item
|
// ModTime returns the modification time of the item
|
||||||
@@ -149,14 +145,6 @@ type CopyFile struct {
|
|||||||
Parent Parent `json:"parent"`
|
Parent Parent `json:"parent"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSharedLink is the request for Public Link
|
|
||||||
type CreateSharedLink struct {
|
|
||||||
SharedLink struct {
|
|
||||||
URL string `json:"url,omitempty"`
|
|
||||||
Access string `json:"access,omitempty"`
|
|
||||||
} `json:"shared_link"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadSessionRequest is uses in Create Upload Session
|
// UploadSessionRequest is uses in Create Upload Session
|
||||||
type UploadSessionRequest struct {
|
type UploadSessionRequest struct {
|
||||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||||
@@ -202,23 +190,3 @@ type CommitUpload struct {
|
|||||||
ContentModifiedAt Time `json:"content_modified_at"`
|
ContentModifiedAt Time `json:"content_modified_at"`
|
||||||
} `json:"attributes"`
|
} `json:"attributes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigJSON defines the shape of a box config.json
|
|
||||||
type ConfigJSON struct {
|
|
||||||
BoxAppSettings AppSettings `json:"boxAppSettings"`
|
|
||||||
EnterpriseID string `json:"enterpriseID"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppSettings defines the shape of the boxAppSettings within box config.json
|
|
||||||
type AppSettings struct {
|
|
||||||
ClientID string `json:"clientID"`
|
|
||||||
ClientSecret string `json:"clientSecret"`
|
|
||||||
AppAuth AppAuth `json:"appAuth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
|
|
||||||
type AppAuth struct {
|
|
||||||
PublicKeyID string `json:"publicKeyID"`
|
|
||||||
PrivateKey string `json:"privateKey"`
|
|
||||||
Passphrase string `json:"passphrase"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,13 +10,8 @@ package box
|
|||||||
// FIXME box can copy a directory
|
// FIXME box can copy a directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/rsa"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -25,31 +20,22 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/lib/jwtutil"
|
"github.com/ncw/rclone/backend/box/api"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/dircache"
|
||||||
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
|
"github.com/ncw/rclone/lib/pacer"
|
||||||
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/jws"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const enc = encodings.Box
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
||||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||||
@@ -62,7 +48,6 @@ const (
|
|||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||||
defaultUploadCutoff = 50 * 1024 * 1024
|
defaultUploadCutoff = 50 * 1024 * 1024
|
||||||
tokenURL = "https://api.box.com/oauth2/token"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -87,34 +72,9 @@ func init() {
|
|||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string, m configmap.Mapper) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
jsonFile, ok := m.Get("box_config_file")
|
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
if err != nil {
|
||||||
var err error
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
|
||||||
boxConfig, err := getBoxConfig(jsonFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
|
||||||
}
|
|
||||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
|
||||||
}
|
|
||||||
claims, err := getClaims(boxConfig, boxSubType)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
|
||||||
}
|
|
||||||
signingHeaders := getSigningHeaders(boxConfig)
|
|
||||||
queryParams := getQueryParams(boxConfig)
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
|
||||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = oauthutil.Config("box", name, m, oauthConfig)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
@@ -123,22 +83,9 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Box App Client Secret\nLeave blank normally.",
|
Help: "Box App Client Secret\nLeave blank normally.",
|
||||||
}, {
|
|
||||||
Name: "box_config_file",
|
|
||||||
Help: "Box App config.json location\nLeave blank normally.",
|
|
||||||
}, {
|
|
||||||
Name: "box_sub_type",
|
|
||||||
Default: "user",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "user",
|
|
||||||
Help: "Rclone should act on behalf of a user",
|
|
||||||
}, {
|
|
||||||
Value: "enterprise",
|
|
||||||
Help: "Rclone should act on behalf of a service account",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
Help: "Cutoff for switching to multipart upload.",
|
||||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@@ -150,74 +97,6 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
|
||||||
file, err := ioutil.ReadFile(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(file, &boxConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
|
||||||
}
|
|
||||||
return boxConfig, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
|
||||||
val, err := jwtutil.RandomHex(20)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
|
||||||
}
|
|
||||||
|
|
||||||
claims = &jws.ClaimSet{
|
|
||||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
|
||||||
Sub: boxConfig.EnterpriseID,
|
|
||||||
Aud: tokenURL,
|
|
||||||
Iat: time.Now().Unix(),
|
|
||||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
|
||||||
PrivateClaims: map[string]interface{}{
|
|
||||||
"box_sub_type": boxSubType,
|
|
||||||
"aud": tokenURL,
|
|
||||||
"jti": val,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return claims, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
|
||||||
signingHeaders := &jws.Header{
|
|
||||||
Algorithm: "RS256",
|
|
||||||
Typ: "JWT",
|
|
||||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
|
||||||
}
|
|
||||||
|
|
||||||
return signingHeaders
|
|
||||||
}
|
|
||||||
|
|
||||||
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|
||||||
queryParams := map[string]string{
|
|
||||||
"client_id": boxConfig.BoxAppSettings.ClientID,
|
|
||||||
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
|
||||||
}
|
|
||||||
|
|
||||||
return queryParams
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
|
||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
|
||||||
if len(rest) > 0 {
|
|
||||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
return rsaKey.(*rsa.PrivateKey), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
@@ -232,7 +111,7 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *pacer.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
uploadToken *pacer.TokenDispenser // control concurrency
|
uploadToken *pacer.TokenDispenser // control concurrency
|
||||||
}
|
}
|
||||||
@@ -247,7 +126,6 @@ type Object struct {
|
|||||||
size int64 // size of the object
|
size int64 // size of the object
|
||||||
modTime time.Time // modification time of the object
|
modTime time.Time // modification time of the object
|
||||||
id string // ID of the object
|
id string // ID of the object
|
||||||
publicLink string // Public Link for the object
|
|
||||||
sha1 string // SHA-1 of the object content
|
sha1 string // SHA-1 of the object content
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,19 +170,31 @@ var retryErrorCodes = []int{
|
|||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
authRetry := false
|
authRety := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||||
authRetry = true
|
authRety = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// substitute reserved characters for box
|
||||||
|
func replaceReservedChars(x string) string {
|
||||||
|
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||||
|
return strings.Replace(x, "\\", "\", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore reserved characters for box
|
||||||
|
func restoreReservedChars(x string) string {
|
||||||
|
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||||
|
return strings.Replace(x, "\", "\\", -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
||||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@@ -312,7 +202,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if item.Name == leaf {
|
||||||
info = item
|
info = item
|
||||||
return true
|
return true
|
||||||
@@ -347,7 +237,6 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.Background()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -362,7 +251,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Box")
|
log.Fatalf("Failed to configure Box: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
@@ -370,7 +259,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@@ -381,7 +270,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath("")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -389,20 +278,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
tempF := *f
|
newF := *f
|
||||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
newF.dirCache = dircache.New(newRoot, rootID, &newF)
|
||||||
tempF.root = newRoot
|
newF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(ctx, false)
|
err = newF.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
_, err := newF.newObjectWithInfo(remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -410,14 +299,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f.features.Fill(&tempF)
|
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
|
||||||
f.dirCache = tempF.dirCache
|
|
||||||
f.root = tempF.root
|
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return &newF, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
@@ -433,7 +316,7 @@ func (f *Fs) rootSlash() string {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -443,7 +326,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
err = o.readMetaData() // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -453,14 +336,14 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if item.Name == leaf {
|
||||||
pathIDOut = item.ID
|
pathIDOut = item.ID
|
||||||
return true
|
return true
|
||||||
@@ -478,7 +361,7 @@ func fieldsValue() url.Values {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
@@ -488,13 +371,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
Parameters: fieldsValue(),
|
Parameters: fieldsValue(),
|
||||||
}
|
}
|
||||||
mkdir := api.CreateFolder{
|
mkdir := api.CreateFolder{
|
||||||
Name: enc.FromStandardName(leaf),
|
Name: replaceReservedChars(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
ID: pathID,
|
ID: pathID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -516,7 +399,7 @@ type listAllFn func(*api.Item) bool
|
|||||||
// Lists the directory required calling the user function on each item found
|
// Lists the directory required calling the user function on each item found
|
||||||
//
|
//
|
||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/folders/" + dirID + "/items",
|
Path: "/folders/" + dirID + "/items",
|
||||||
@@ -531,7 +414,7 @@ OUTER:
|
|||||||
var result api.FolderItems
|
var result api.FolderItems
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -554,7 +437,7 @@ OUTER:
|
|||||||
if item.ItemStatus != api.ItemStatusActive {
|
if item.ItemStatus != api.ItemStatusActive {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
item.Name = enc.ToStandardName(item.Name)
|
item.Name = restoreReservedChars(item.Name)
|
||||||
if fn(item) {
|
if fn(item) {
|
||||||
found = true
|
found = true
|
||||||
break OUTER
|
break OUTER
|
||||||
@@ -577,17 +460,17 @@ OUTER:
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
err = f.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||||
remote := path.Join(dir, info.Name)
|
remote := path.Join(dir, info.Name)
|
||||||
if info.Type == api.ItemTypeFolder {
|
if info.Type == api.ItemTypeFolder {
|
||||||
// cache the directory ID for later lookups
|
// cache the directory ID for later lookups
|
||||||
@@ -596,7 +479,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if info.Type == api.ItemTypeFile {
|
} else if info.Type == api.ItemTypeFile {
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
o, err := f.newObjectWithInfo(remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@@ -620,9 +503,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -639,22 +522,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(ctx, in, src)
|
return f.PutUnchecked(in, src)
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(ctx, in, src, options...)
|
return f.Put(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked the object into the container
|
// PutUnchecked the object into the container
|
||||||
@@ -664,56 +547,56 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime()
|
||||||
|
|
||||||
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
o, _, _, err := f.createObject(remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(dir string) error {
|
||||||
err := f.dirCache.FindRoot(ctx, true)
|
err := f.dirCache.FindRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
_, err = f.dirCache.FindDir(dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteObject removes an object by ID
|
// deleteObject removes an object by ID
|
||||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
func (f *Fs) deleteObject(id string) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
Path: "/files/" + id,
|
Path: "/files/" + id,
|
||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
}
|
}
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.Call(ctx, &opts)
|
resp, err := f.srv.Call(&opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(ctx, false)
|
err := dc.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(ctx, dir, false)
|
rootID, err := dc.FindDir(dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -727,7 +610,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.Call(ctx, &opts)
|
resp, err = f.srv.Call(&opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -743,8 +626,8 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
return f.purgeCheck(ctx, dir, true)
|
return f.purgeCheck(dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@@ -761,13 +644,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData(ctx)
|
err := srcObj.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -779,7 +662,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -790,8 +673,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
Path: "/files/" + srcObj.id + "/copy",
|
Path: "/files/" + srcObj.id + "/copy",
|
||||||
Parameters: fieldsValue(),
|
Parameters: fieldsValue(),
|
||||||
}
|
}
|
||||||
|
replacedLeaf := replaceReservedChars(leaf)
|
||||||
copyFile := api.CopyFile{
|
copyFile := api.CopyFile{
|
||||||
Name: enc.FromStandardName(leaf),
|
Name: replacedLeaf,
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
ID: directoryID,
|
ID: directoryID,
|
||||||
},
|
},
|
||||||
@@ -799,7 +683,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -817,12 +701,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge() error {
|
||||||
return f.purgeCheck(ctx, "", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move a file or folder
|
// move a file or folder
|
||||||
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||||
// Move the object
|
// Move the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
@@ -830,14 +714,14 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
|||||||
Parameters: fieldsValue(),
|
Parameters: fieldsValue(),
|
||||||
}
|
}
|
||||||
move := api.UpdateFileMove{
|
move := api.UpdateFileMove{
|
||||||
Name: enc.FromStandardName(leaf),
|
Name: replaceReservedChars(leaf),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
ID: directoryID,
|
ID: directoryID,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -855,7 +739,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -863,13 +747,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -889,7 +773,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -905,14 +789,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
err := srcFs.dirCache.FindRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(ctx, true)
|
err = f.dirCache.FindRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -928,14 +812,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@@ -946,13 +830,13 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
_, err = f.move("/folders/", srcID, leaf, directoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -960,46 +844,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
|
||||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
|
||||||
var opts rest.Opts
|
|
||||||
if err == nil {
|
|
||||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
|
||||||
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "/folders/" + id,
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
|
||||||
o, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.(*Object).publicLink != "" {
|
|
||||||
return o.(*Object).publicLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = rest.Opts{
|
|
||||||
Method: "PUT",
|
|
||||||
Path: "/files/" + o.(*Object).id,
|
|
||||||
Parameters: fieldsValue(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
shareLink := api.CreateSharedLink{}
|
|
||||||
var info api.Item
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
return info.SharedLink.URL, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing as an
|
// DirCacheFlush resets the directory cache - used in testing as an
|
||||||
// optional interface
|
// optional interface
|
||||||
func (f *Fs) DirCacheFlush() {
|
func (f *Fs) DirCacheFlush() {
|
||||||
@@ -1031,8 +875,13 @@ func (o *Object) Remote() string {
|
|||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// srvPath returns a path for use in server
|
||||||
|
func (o *Object) srvPath() string {
|
||||||
|
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||||
|
}
|
||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||||
if t != hash.SHA1 {
|
if t != hash.SHA1 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -1041,7 +890,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
err := o.readMetaData(context.TODO())
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@@ -1059,18 +908,17 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
o.sha1 = info.SHA1
|
o.sha1 = info.SHA1
|
||||||
o.modTime = info.ModTime()
|
o.modTime = info.ModTime()
|
||||||
o.id = info.ID
|
o.id = info.ID
|
||||||
o.publicLink = info.SharedLink.URL
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
func (o *Object) readMetaData() (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
||||||
@@ -1087,8 +935,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@@ -1097,7 +945,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setModTime sets the modification time of the local fs object
|
// setModTime sets the modification time of the local fs object
|
||||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/files/" + o.id,
|
Path: "/files/" + o.id,
|
||||||
@@ -1108,15 +956,15 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
|||||||
}
|
}
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(modTime time.Time) error {
|
||||||
info, err := o.setModTime(ctx, modTime)
|
info, err := o.setModTime(modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1129,7 +977,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
@@ -1141,7 +989,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(&opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1153,9 +1001,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// upload does a single non-multipart upload
|
// upload does a single non-multipart upload
|
||||||
//
|
//
|
||||||
// This is recommended for less than 50 MB of content
|
// This is recommended for less than 50 MB of content
|
||||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||||
upload := api.UploadFile{
|
upload := api.UploadFile{
|
||||||
Name: enc.FromStandardName(leaf),
|
Name: replaceReservedChars(leaf),
|
||||||
ContentModifiedAt: api.Time(modTime),
|
ContentModifiedAt: api.Time(modTime),
|
||||||
ContentCreatedAt: api.Time(modTime),
|
ContentCreatedAt: api.Time(modTime),
|
||||||
Parent: api.Parent{
|
Parent: api.Parent{
|
||||||
@@ -1180,7 +1028,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
opts.Path = "/files/content"
|
opts.Path = "/files/content"
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1197,32 +1045,32 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime()
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload with simple or multipart
|
// Upload with simple or multipart
|
||||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
err = o.upload(in, leaf, directoryID, modTime)
|
||||||
} else {
|
} else {
|
||||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove() error {
|
||||||
return o.fs.deleteObject(ctx, o.id)
|
return o.fs.deleteObject(o.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
// ID returns the ID of the Object if known, or "" if not
|
||||||
@@ -1239,7 +1087,6 @@ var (
|
|||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package box_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/box"
|
"github.com/ncw/rclone/backend/box"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ package box
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -15,15 +14,15 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/box/api"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// createUploadSession creates an upload session for the object
|
// createUploadSession creates an upload session for the object
|
||||||
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/files/upload_sessions",
|
Path: "/files/upload_sessions",
|
||||||
@@ -38,11 +37,11 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
|||||||
} else {
|
} else {
|
||||||
opts.Path = "/files/upload_sessions"
|
opts.Path = "/files/upload_sessions"
|
||||||
request.FolderID = directoryID
|
request.FolderID = directoryID
|
||||||
request.FileName = enc.FromStandardName(leaf)
|
request.FileName = replaceReservedChars(leaf)
|
||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
@@ -54,7 +53,7 @@ func sha1Digest(digest []byte) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uploadPart uploads a part in an upload session
|
// uploadPart uploads a part in an upload session
|
||||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||||
chunkSize := int64(len(chunk))
|
chunkSize := int64(len(chunk))
|
||||||
sha1sum := sha1.Sum(chunk)
|
sha1sum := sha1.Sum(chunk)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@@ -71,7 +70,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
|||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
opts.Body = wrap(bytes.NewReader(chunk))
|
opts.Body = wrap(bytes.NewReader(chunk))
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -81,7 +80,7 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
|||||||
}
|
}
|
||||||
|
|
||||||
// commitUpload finishes an upload session
|
// commitUpload finishes an upload session
|
||||||
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||||
@@ -98,14 +97,14 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
|||||||
var body []byte
|
var body []byte
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
// For discussion of this value see:
|
// For discussion of this value see:
|
||||||
// https://github.com/rclone/rclone/issues/2054
|
// https://github.com/ncw/rclone/issues/2054
|
||||||
maxTries := o.fs.opt.CommitRetries
|
maxTries := o.fs.opt.CommitRetries
|
||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
outer:
|
outer:
|
||||||
for tries = 0; tries < maxTries; tries++ {
|
for tries = 0; tries < maxTries; tries++ {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
}
|
}
|
||||||
@@ -113,7 +112,7 @@ outer:
|
|||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
delay := defaultDelay
|
delay := defaultDelay
|
||||||
var why string
|
why := "unknown"
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Sometimes we get 400 Error with
|
// Sometimes we get 400 Error with
|
||||||
// parts_mismatch immediately after uploading
|
// parts_mismatch immediately after uploading
|
||||||
@@ -155,7 +154,7 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// abortUpload cancels an upload session
|
// abortUpload cancels an upload session
|
||||||
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
|
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
Path: "/files/upload_sessions/" + SessionID,
|
Path: "/files/upload_sessions/" + SessionID,
|
||||||
@@ -164,16 +163,16 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
|||||||
}
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(&opts)
|
||||||
return shouldRetry(resp, err)
|
return shouldRetry(resp, err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// uploadMultipart uploads a file using multipart upload
|
||||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||||
// Create upload session
|
// Create upload session
|
||||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload create session failed")
|
return errors.Wrap(err, "multipart upload create session failed")
|
||||||
}
|
}
|
||||||
@@ -184,7 +183,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
defer func() {
|
defer func() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||||
cancelErr := o.abortUpload(ctx, session.ID)
|
cancelErr := o.abortUpload(session.ID)
|
||||||
if cancelErr != nil {
|
if cancelErr != nil {
|
||||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||||
}
|
}
|
||||||
@@ -212,8 +211,8 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := remaining
|
||||||
if reqSize >= chunkSize {
|
if reqSize >= int64(chunkSize) {
|
||||||
reqSize = chunkSize
|
reqSize = int64(chunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
@@ -236,7 +235,7 @@ outer:
|
|||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer o.fs.uploadToken.Put()
|
defer o.fs.uploadToken.Put()
|
||||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||||
select {
|
select {
|
||||||
@@ -264,7 +263,7 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Finalise the upload session
|
// Finalise the upload session
|
||||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||||
}
|
}
|
||||||
|
|||||||
620
backend/cache/cache.go
vendored
620
backend/cache/cache.go
vendored
File diff suppressed because it is too large
Load Diff
206
backend/cache/cache_internal_test.go
vendored
206
backend/cache/cache_internal_test.go
vendored
@@ -4,10 +4,6 @@ package cache_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
goflag "flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
@@ -16,27 +12,34 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/cache"
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"encoding/base64"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
goflag "flag"
|
||||||
"github.com/rclone/rclone/backend/local"
|
"fmt"
|
||||||
"github.com/rclone/rclone/fs"
|
"runtime/debug"
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"encoding/json"
|
||||||
"github.com/rclone/rclone/fs/object"
|
"net/http"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/ncw/rclone/backend/cache"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/ncw/rclone/backend/crypt"
|
||||||
"github.com/rclone/rclone/vfs"
|
_ "github.com/ncw/rclone/backend/drive"
|
||||||
"github.com/rclone/rclone/vfs/vfsflags"
|
"github.com/ncw/rclone/backend/local"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/object"
|
||||||
|
"github.com/ncw/rclone/fs/rc"
|
||||||
|
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||||
|
"github.com/ncw/rclone/fstest"
|
||||||
|
"github.com/ncw/rclone/vfs"
|
||||||
|
"github.com/ncw/rclone/vfs/vfsflags"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -122,7 +125,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listInner, err := rootFs2.List(context.Background(), "")
|
listInner, err := rootFs2.List("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Len(t, listRoot, 1)
|
require.Len(t, listRoot, 1)
|
||||||
@@ -140,10 +143,10 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "test")
|
err := rootFs.Mkdir("test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
||||||
_, err = rootFs.List(context.Background(), "test")
|
_, err = rootFs.List("test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testReader := runInstance.randomReader(t, testSize)
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
@@ -268,7 +271,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
obj, err := rootFs.NewObject("404")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, obj)
|
require.Nil(t, obj)
|
||||||
}
|
}
|
||||||
@@ -356,8 +359,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
|||||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
testData1 = []byte(random.String(100))
|
testData1 = []byte(fstest.RandomString(100))
|
||||||
testData2 = []byte(random.String(200))
|
testData2 = []byte(fstest.RandomString(200))
|
||||||
}
|
}
|
||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
@@ -389,10 +392,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
// write the object
|
// write the object
|
||||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||||
require.Equal(t, o.Size(), testSize)
|
require.Equal(t, o.Size(), int64(testSize))
|
||||||
time.Sleep(time.Second * 3)
|
time.Sleep(time.Second * 3)
|
||||||
|
|
||||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
@@ -447,7 +450,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize := int64(len([]byte("test content")))
|
expectedSize := int64(len([]byte("test content")))
|
||||||
var data2 []byte
|
var data2 []byte
|
||||||
@@ -459,7 +462,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
log.Printf("updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
@@ -505,9 +508,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
|
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
// list in mount
|
// list in mount
|
||||||
@@ -517,7 +520,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@@ -591,9 +594,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
err = rootFs.Mkdir(context.Background(), "test")
|
err = rootFs.Mkdir("test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir(context.Background(), "test/one")
|
err = rootFs.Mkdir("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
@@ -610,7 +613,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
require.False(t, found)
|
require.False(t, found)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@@ -672,28 +675,28 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(context.Background(), wrappedTime)
|
err = o.SetModTime(wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
co, err := rootFs.NewObject("data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||||
|
|
||||||
cfs.DirCacheFlush() // flush the cache
|
cfs.DirCacheFlush() // flush the cache
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
co, err = rootFs.NewObject("data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||||
cacheExpire := rc.Calls.Get("cache/expire")
|
rcflags.Opt.Enabled = true
|
||||||
assert.NotNil(t, cacheExpire)
|
rc.Start(&rcflags.Opt)
|
||||||
|
|
||||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
@@ -715,44 +718,50 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(context.Background(), wrappedTime)
|
err = o.SetModTime(wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
co, err := rootFs.NewObject("data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||||
|
|
||||||
// Call the rc function
|
m := make(map[string]string)
|
||||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
res, err := http.Post(fmt.Sprintf("http://localhost:5572/cache/expire?remote=%s", "data.bin"), "application/json; charset=utf-8", strings.NewReader(""))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = res.Body.Close()
|
||||||
|
}()
|
||||||
|
_ = json.NewDecoder(res.Body).Decode(&m)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
require.Equal(t, "ok", m["status"])
|
require.Equal(t, "ok", m["status"])
|
||||||
require.Contains(t, m["message"], "cached file cleared")
|
require.Contains(t, m["message"], "cached file cleared")
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
co, err = rootFs.NewObject("data.bin")
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
|
||||||
_, err = runInstance.list(t, rootFs, "")
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||||
|
li1, err := runInstance.list(t, rootFs, "")
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
testData2 := randStringBytes(int(chunkSize))
|
testData2 := randStringBytes(int(chunkSize))
|
||||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||||
|
|
||||||
// list should have 1 item only
|
// list should have 1 item only
|
||||||
li1, err := runInstance.list(t, rootFs, "")
|
li1, err = runInstance.list(t, rootFs, "")
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, li1, 1)
|
require.Len(t, li1, 1)
|
||||||
|
|
||||||
// Call the rc function
|
m = make(map[string]string)
|
||||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
res2, err := http.Post("http://localhost:5572/cache/expire?remote=/", "application/json; charset=utf-8", strings.NewReader(""))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
_ = res2.Body.Close()
|
||||||
|
}()
|
||||||
|
_ = json.NewDecoder(res2.Body).Decode(&m)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
require.Equal(t, "ok", m["status"])
|
require.Equal(t, "ok", m["status"])
|
||||||
@@ -760,7 +769,6 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
|
|
||||||
// list should have 2 items now
|
// list should have 2 items now
|
||||||
li2, err := runInstance.list(t, rootFs, "")
|
li2, err := runInstance.list(t, rootFs, "")
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, li2, 2)
|
require.Len(t, li2, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -796,7 +804,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
// create some rand test data
|
// create some rand test data
|
||||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
@@ -835,7 +843,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, l, 1)
|
require.Len(t, l, 1)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
l, err = runInstance.list(t, rootFs, "test")
|
l, err = runInstance.list(t, rootFs, "test")
|
||||||
@@ -870,14 +878,14 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir(context.Background(), "test")
|
err = cfs.UnWrap().Mkdir("test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := 1; i <= 4; i++ {
|
for i := 1; i <= 4; i++ {
|
||||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
|
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for j := 1; j <= 4; j++ {
|
for j := 1; j <= 4; j++ {
|
||||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
|
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
||||||
@@ -1082,10 +1090,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = f.Features().Purge(context.Background())
|
_ = f.Features().Purge()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir(context.Background(), "")
|
err = f.Mkdir("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount && !r.isMounted {
|
if r.useMount && !r.isMounted {
|
||||||
r.mountFs(t, f)
|
r.mountFs(t, f)
|
||||||
@@ -1099,7 +1107,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
|||||||
r.unmountFs(t, f)
|
r.unmountFs(t, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.Features().Purge(context.Background())
|
err := f.Features().Purge()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -1201,7 +1209,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
|||||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
_ = r.writeObjectReader(t, f, remote, in)
|
_ = r.writeObjectReader(t, f, remote, in)
|
||||||
o, err := f.NewObject(context.Background(), remote)
|
o, err := f.NewObject(remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data)), o.Size())
|
require.Equal(t, int64(len(data)), o.Size())
|
||||||
return o
|
return o
|
||||||
@@ -1210,7 +1218,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
|
|||||||
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
||||||
modTime := time.Now()
|
modTime := time.Now()
|
||||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||||
obj, err := f.Put(context.Background(), in, objInfo)
|
obj, err := f.Put(in, objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount {
|
if r.useMount {
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
@@ -1230,18 +1238,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
|||||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
obj, err = f.NewObject(remote)
|
||||||
} else {
|
} else {
|
||||||
in1 := bytes.NewReader(data1)
|
in1 := bytes.NewReader(data1)
|
||||||
in2 := bytes.NewReader(data2)
|
in2 := bytes.NewReader(data2)
|
||||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||||
|
|
||||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
obj, err = f.Put(in1, objInfo1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
obj, err = f.NewObject(remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = obj.Update(context.Background(), in2, objInfo2)
|
err = obj.Update(in2, objInfo2)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -1270,7 +1278,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
co, err := f.NewObject(context.Background(), remote)
|
co, err := f.NewObject(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
@@ -1285,7 +1293,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
||||||
size := end - offset
|
size := end - offset
|
||||||
checkSample := make([]byte, size)
|
checkSample := make([]byte, size)
|
||||||
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
|
reader, err := o.Open(&fs.SeekOption{Offset: offset})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
totalRead, err := io.ReadFull(reader, checkSample)
|
totalRead, err := io.ReadFull(reader, checkSample)
|
||||||
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
||||||
@@ -1302,7 +1310,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
|||||||
if r.useMount {
|
if r.useMount {
|
||||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||||
} else {
|
} else {
|
||||||
err = f.Mkdir(context.Background(), remote)
|
err = f.Mkdir(remote)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@@ -1314,11 +1322,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
err = os.Remove(path.Join(r.mntDir, remote))
|
err = os.Remove(path.Join(r.mntDir, remote))
|
||||||
} else {
|
} else {
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
obj, err = f.NewObject(context.Background(), remote)
|
obj, err = f.NewObject(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = f.Rmdir(context.Background(), remote)
|
err = f.Rmdir(remote)
|
||||||
} else {
|
} else {
|
||||||
err = obj.Remove(context.Background())
|
err = obj.Remove()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1336,7 +1344,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(context.Background(), remote)
|
list, err = f.List(remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll)
|
l = append(l, ll)
|
||||||
}
|
}
|
||||||
@@ -1355,7 +1363,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(context.Background(), remote)
|
list, err = f.List(remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll.Remote())
|
l = append(l, ll.Remote())
|
||||||
}
|
}
|
||||||
@@ -1395,7 +1403,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().DirMove != nil {
|
} else if rootFs.Features().DirMove != nil {
|
||||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
err = rootFs.Features().DirMove(rootFs, src, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1417,11 +1425,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Move != nil {
|
} else if rootFs.Features().Move != nil {
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
|
_, err = rootFs.Features().Move(obj1, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1443,11 +1451,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Copy != nil {
|
} else if rootFs.Features().Copy != nil {
|
||||||
obj, err := rootFs.NewObject(context.Background(), src)
|
obj, err := rootFs.NewObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
|
_, err = rootFs.Features().Copy(obj, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1469,11 +1477,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
|||||||
}
|
}
|
||||||
return fi.ModTime(), nil
|
return fi.ModTime(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
return obj1.ModTime(context.Background()), nil
|
return obj1.ModTime(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||||
@@ -1486,7 +1494,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
|||||||
}
|
}
|
||||||
return fi.Size(), nil
|
return fi.Size(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
obj1, err := rootFs.NewObject(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return int64(0), err
|
return int64(0), err
|
||||||
}
|
}
|
||||||
@@ -1497,8 +1505,7 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
if r.useMount {
|
if r.useMount {
|
||||||
var f *os.File
|
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1508,15 +1515,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
}()
|
}()
|
||||||
_, err = f.WriteString(data + append)
|
_, err = f.WriteString(data + append)
|
||||||
} else {
|
} else {
|
||||||
var obj1 fs.Object
|
obj1, err := rootFs.NewObject(src)
|
||||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data1 := []byte(data + append)
|
data1 := []byte(data + append)
|
||||||
r := bytes.NewReader(data1)
|
r := bytes.NewReader(data1)
|
||||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||||
err = obj1.Update(context.Background(), r, objInfo1)
|
err = obj1.Update(r, objInfo1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -1641,13 +1647,15 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
|||||||
cfs, ok := f.(*cache.Fs)
|
cfs, ok := f.(*cache.Fs)
|
||||||
if ok {
|
if ok {
|
||||||
return cfs, nil
|
return cfs, nil
|
||||||
}
|
} else {
|
||||||
if f.Features().UnWrap != nil {
|
if f.Features().UnWrap != nil {
|
||||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||||
if ok {
|
if ok {
|
||||||
return cfs, nil
|
return cfs, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.New("didn't found a cache fs")
|
return nil, errors.New("didn't found a cache fs")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
6
backend/cache/cache_mount_unix_test.go
vendored
6
backend/cache/cache_mount_unix_test.go
vendored
@@ -9,9 +9,9 @@ import (
|
|||||||
|
|
||||||
"bazil.org/fuse"
|
"bazil.org/fuse"
|
||||||
fusefs "bazil.org/fuse/fs"
|
fusefs "bazil.org/fuse/fs"
|
||||||
"github.com/rclone/rclone/cmd/mount"
|
"github.com/ncw/rclone/cmd/mount"
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/ncw/rclone/cmd/mountlib"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
6
backend/cache/cache_mount_windows_test.go
vendored
6
backend/cache/cache_mount_windows_test.go
vendored
@@ -9,10 +9,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/billziss-gh/cgofuse/fuse"
|
"github.com/billziss-gh/cgofuse/fuse"
|
||||||
|
"github.com/ncw/rclone/cmd/cmount"
|
||||||
|
"github.com/ncw/rclone/cmd/mountlib"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/cmd/cmount"
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
13
backend/cache/cache_test.go
vendored
13
backend/cache/cache_test.go
vendored
@@ -7,18 +7,15 @@ package cache_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cache"
|
"github.com/ncw/rclone/backend/cache"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/ncw/rclone/backend/local"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestCache:",
|
RemoteName: "TestCache:",
|
||||||
NilObject: (*cache.Object)(nil),
|
NilObject: (*cache.Object)(nil),
|
||||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
|
||||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
56
backend/cache/cache_upload_test.go
vendored
56
backend/cache/cache_upload_test.go
vendored
@@ -3,8 +3,6 @@
|
|||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -12,9 +10,11 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/cache"
|
"fmt"
|
||||||
_ "github.com/rclone/rclone/backend/drive"
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/backend/cache"
|
||||||
|
_ "github.com/ncw/rclone/backend/drive"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -86,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir("one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
err = rootFs.Mkdir("one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir(context.Background(), "second")
|
err = rootFs.Mkdir("second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -123,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "one")
|
err := rootFs.Mkdir("one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
err = rootFs.Mkdir("one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir(context.Background(), "second")
|
err = rootFs.Mkdir("second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@@ -166,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir(context.Background(), "test")
|
err := rootFs.Mkdir("test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
@@ -234,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "second/one")
|
_, err = rootFs.NewObject("second/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -257,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "directory not empty")
|
require.Contains(t, err.Error(), "directory not empty")
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -271,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
_, err = rootFs.NewObject("test/second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -290,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
_, err = rootFs.NewObject("test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -307,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
// test Remove -- allowed
|
// test Remove -- allowed
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -319,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
obj2, err := rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||||
require.Equal(t, "one content updated", string(data2))
|
require.Equal(t, "one content updated", string(data2))
|
||||||
@@ -367,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -379,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Rmdir
|
// test Rmdir
|
||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -390,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
_, err = rootFs.NewObject("test/second")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@@ -405,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
_, err = rootFs.NewObject("test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -422,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Remove
|
// test Remove
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
_, err = rootFs.NewObject("test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
|||||||
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
@@ -0,0 +1,455 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package cache_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/cache"
|
||||||
|
_ "github.com/ncw/rclone/backend/drive"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(524288000)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||||
|
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if runInstance.rootIsCrypt {
|
||||||
|
require.Equal(t, int64(524416032), ti.Size())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, testSize, ti.Size())
|
||||||
|
}
|
||||||
|
de1, err := runInstance.list(t, rootFs, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||||
|
// check if it was removed from temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, data2, 1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(10485760)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||||
|
|
||||||
|
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||||
|
//require.NoError(t, err)
|
||||||
|
|
||||||
|
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(1048576)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
testReader2 := runInstance.randomReader(t, testSize)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||||
|
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||||
|
require.False(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
de1, err = runInstance.list(t, rootFs, "second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
minSize := 5242880
|
||||||
|
maxSize := 10485760
|
||||||
|
totalFiles := 10
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
lastFile := ""
|
||||||
|
for i := 0; i < totalFiles; i++ {
|
||||||
|
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||||
|
testReader := runInstance.randomReader(t, size)
|
||||||
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
|
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||||
|
|
||||||
|
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||||
|
time.Sleep(time.Second * 3)
|
||||||
|
}
|
||||||
|
lastFile = remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if cache lists all files, likely temp upload didn't finish yet
|
||||||
|
de1, err := runInstance.list(t, rootFs, "test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, totalFiles)
|
||||||
|
|
||||||
|
// wait for background uploader to do its thing
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||||
|
|
||||||
|
// retry until we have no more temp files and fail if they don't go down to 0
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
// check if cache lists all files
|
||||||
|
de1, err = runInstance.list(t, rootFs, "test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, totalFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||||
|
id := "tiutfo"
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data1)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test DirMove - allowed
|
||||||
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("second/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
var started bool
|
||||||
|
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, started)
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Rmdir - allowed
|
||||||
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "directory not empty")
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||||
|
require.False(t, started)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test Move/Rename -- allowed
|
||||||
|
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
// try to read from it
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Copy -- allowed
|
||||||
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/third")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Remove -- allowed
|
||||||
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// test Update -- allowed
|
||||||
|
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||||
|
require.NoError(t, err)
|
||||||
|
obj2, err := rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||||
|
require.Equal(t, "one content updated", string(data2))
|
||||||
|
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
if runInstance.rootIsCrypt {
|
||||||
|
require.Equal(t, int64(67), tmpInfo.Size())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
// test SetModTime -- allowed
|
||||||
|
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, secondModTime, firstModTime)
|
||||||
|
require.NotEqual(t, time.Time{}, firstModTime)
|
||||||
|
require.NotEqual(t, time.Time{}, secondModTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||||
|
id := "tiuufo"
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data1)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test DirMove
|
||||||
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Rmdir
|
||||||
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test Move/Rename
|
||||||
|
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.Error(t, err)
|
||||||
|
// try to read from it
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/second")
|
||||||
|
require.Error(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Copy -- allowed
|
||||||
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/third")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Remove
|
||||||
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// test Update - this seems to work. Why? FIXME
|
||||||
|
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
//require.NoError(t, err)
|
||||||
|
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||||
|
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||||
|
// require.Equal(t, "one content", string(data2))
|
||||||
|
//
|
||||||
|
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
// require.NoError(t, err)
|
||||||
|
// if runInstance.rootIsCrypt {
|
||||||
|
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||||
|
// } else {
|
||||||
|
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
//require.Error(t, err)
|
||||||
|
|
||||||
|
// test SetModTime -- seems to work cause of previous
|
||||||
|
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
//require.NoError(t, err)
|
||||||
|
//require.Equal(t, secondModTime, firstModTime)
|
||||||
|
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||||
|
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||||
|
}
|
||||||
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
--- cache_upload_test.go
|
||||||
|
+++ cache_upload_test.go
|
||||||
|
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||||
|
}
|
||||||
|
r.tempFiles = nil
|
||||||
|
debug.FreeOSMemory()
|
||||||
|
- for k, v := range r.runDefaultFlagMap {
|
||||||
|
- _ = flag.Set(k, v)
|
||||||
|
- }
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||||
12
backend/cache/directory.go
vendored
12
backend/cache/directory.go
vendored
@@ -3,11 +3,11 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"path"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"path"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Directory is a generic dir that stores basic information about it
|
// Directory is a generic dir that stores basic information about it
|
||||||
@@ -56,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||||
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||||
var cd *Directory
|
var cd *Directory
|
||||||
fullRemote := path.Join(f.Root(), d.Remote())
|
fullRemote := path.Join(f.Root(), d.Remote())
|
||||||
|
|
||||||
@@ -68,7 +68,7 @@ func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Director
|
|||||||
CacheFs: f,
|
CacheFs: f,
|
||||||
Name: name,
|
Name: name,
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
CacheModTime: d.ModTime(ctx).UnixNano(),
|
CacheModTime: d.ModTime().UnixNano(),
|
||||||
CacheSize: d.Size(),
|
CacheSize: d.Size(),
|
||||||
CacheItems: d.Items(),
|
CacheItems: d.Items(),
|
||||||
CacheType: "Directory",
|
CacheType: "Directory",
|
||||||
@@ -111,7 +111,7 @@ func (d *Directory) parentRemote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
func (d *Directory) ModTime() time.Time {
|
||||||
return time.Unix(0, d.CacheModTime)
|
return time.Unix(0, d.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
112
backend/cache/handle.go
vendored
112
backend/cache/handle.go
vendored
@@ -3,18 +3,18 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var uploaderMap = make(map[string]*backgroundWriter)
|
var uploaderMap = make(map[string]*backgroundWriter)
|
||||||
@@ -41,7 +41,6 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
|||||||
|
|
||||||
// Handle is managing the read/write/seek operations on an open handle
|
// Handle is managing the read/write/seek operations on an open handle
|
||||||
type Handle struct {
|
type Handle struct {
|
||||||
ctx context.Context
|
|
||||||
cachedObject *Object
|
cachedObject *Object
|
||||||
cfs *Fs
|
cfs *Fs
|
||||||
memory *Memory
|
memory *Memory
|
||||||
@@ -50,19 +49,17 @@ type Handle struct {
|
|||||||
offset int64
|
offset int64
|
||||||
seenOffsets map[int64]bool
|
seenOffsets map[int64]bool
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
workersWg sync.WaitGroup
|
|
||||||
confirmReading chan bool
|
confirmReading chan bool
|
||||||
workers int
|
|
||||||
maxWorkerID int
|
UseMemory bool
|
||||||
UseMemory bool
|
workers []*worker
|
||||||
closed bool
|
closed bool
|
||||||
reading bool
|
reading bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewObjectHandle returns a new Handle for an existing Object
|
// NewObjectHandle returns a new Handle for an existing Object
|
||||||
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||||
r := &Handle{
|
r := &Handle{
|
||||||
ctx: ctx,
|
|
||||||
cachedObject: o,
|
cachedObject: o,
|
||||||
cfs: cfs,
|
cfs: cfs,
|
||||||
offset: 0,
|
offset: 0,
|
||||||
@@ -98,7 +95,7 @@ func (r *Handle) String() string {
|
|||||||
|
|
||||||
// startReadWorkers will start the worker pool
|
// startReadWorkers will start the worker pool
|
||||||
func (r *Handle) startReadWorkers() {
|
func (r *Handle) startReadWorkers() {
|
||||||
if r.workers > 0 {
|
if r.hasAtLeastOneWorker() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||||
@@ -120,27 +117,26 @@ func (r *Handle) startReadWorkers() {
|
|||||||
|
|
||||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||||
func (r *Handle) scaleWorkers(desired int) {
|
func (r *Handle) scaleWorkers(desired int) {
|
||||||
current := r.workers
|
current := len(r.workers)
|
||||||
if current == desired {
|
if current == desired {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if current > desired {
|
if current > desired {
|
||||||
// scale in gracefully
|
// scale in gracefully
|
||||||
for r.workers > desired {
|
for i := 0; i < current-desired; i++ {
|
||||||
r.preloadQueue <- -1
|
r.preloadQueue <- -1
|
||||||
r.workers--
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// scale out
|
// scale out
|
||||||
for r.workers < desired {
|
for i := 0; i < desired-current; i++ {
|
||||||
w := &worker{
|
w := &worker{
|
||||||
r: r,
|
r: r,
|
||||||
id: r.maxWorkerID,
|
ch: r.preloadQueue,
|
||||||
|
id: current + i,
|
||||||
}
|
}
|
||||||
r.workersWg.Add(1)
|
|
||||||
r.workers++
|
|
||||||
r.maxWorkerID++
|
|
||||||
go w.run()
|
go w.run()
|
||||||
|
|
||||||
|
r.workers = append(r.workers, w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// ignore first scale out from 0
|
// ignore first scale out from 0
|
||||||
@@ -152,7 +148,7 @@ func (r *Handle) scaleWorkers(desired int) {
|
|||||||
func (r *Handle) confirmExternalReading() {
|
func (r *Handle) confirmExternalReading() {
|
||||||
// if we have a max value of workers
|
// if we have a max value of workers
|
||||||
// then we skip this step
|
// then we skip this step
|
||||||
if r.workers > 1 ||
|
if len(r.workers) > 1 ||
|
||||||
!r.cacheFs().plexConnector.isConfigured() {
|
!r.cacheFs().plexConnector.isConfigured() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -182,7 +178,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < r.workers; i++ {
|
for i := 0; i < len(r.workers); i++ {
|
||||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@@ -197,6 +193,16 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||||
|
oneWorker := false
|
||||||
|
for i := 0; i < len(r.workers); i++ {
|
||||||
|
if r.workers[i].isRunning() {
|
||||||
|
oneWorker = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return oneWorker
|
||||||
|
}
|
||||||
|
|
||||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||||
// it can be from transient or persistent cache
|
// it can be from transient or persistent cache
|
||||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||||
@@ -237,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
// not found in ram or
|
// not found in ram or
|
||||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||||
if err != nil || len(data) == 0 || !found {
|
if err != nil || len(data) == 0 || !found {
|
||||||
if r.workers == 0 {
|
if !r.hasAtLeastOneWorker() {
|
||||||
fs.Errorf(r, "out of workers")
|
fs.Errorf(r, "out of workers")
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
@@ -298,7 +304,14 @@ func (r *Handle) Close() error {
|
|||||||
close(r.preloadQueue)
|
close(r.preloadQueue)
|
||||||
r.closed = true
|
r.closed = true
|
||||||
// wait for workers to complete their jobs before returning
|
// wait for workers to complete their jobs before returning
|
||||||
r.workersWg.Wait()
|
waitCount := 3
|
||||||
|
for i := 0; i < len(r.workers); i++ {
|
||||||
|
waitIdx := 0
|
||||||
|
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
waitIdx++
|
||||||
|
}
|
||||||
|
}
|
||||||
r.memory.db.Flush()
|
r.memory.db.Flush()
|
||||||
|
|
||||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||||
@@ -335,9 +348,12 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type worker struct {
|
type worker struct {
|
||||||
r *Handle
|
r *Handle
|
||||||
rc io.ReadCloser
|
ch <-chan int64
|
||||||
id int
|
rc io.ReadCloser
|
||||||
|
id int
|
||||||
|
running bool
|
||||||
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// String is a representation of this worker
|
// String is a representation of this worker
|
||||||
@@ -354,7 +370,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
r := w.rc
|
r := w.rc
|
||||||
if w.rc == nil {
|
if w.rc == nil {
|
||||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -364,7 +380,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
if !closeOpen {
|
if !closeOpen {
|
||||||
if do, ok := r.(fs.RangeSeeker); ok {
|
if do, ok := r.(fs.RangeSeeker); ok {
|
||||||
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||||
return r, err
|
return r, err
|
||||||
} else if do, ok := r.(io.Seeker); ok {
|
} else if do, ok := r.(io.Seeker); ok {
|
||||||
_, err = do.Seek(offset, io.SeekStart)
|
_, err = do.Seek(offset, io.SeekStart)
|
||||||
@@ -374,7 +390,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
_ = w.rc.Close()
|
_ = w.rc.Close()
|
||||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -382,19 +398,33 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *worker) isRunning() bool {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.running
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *worker) setRunning(f bool) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
w.running = f
|
||||||
|
}
|
||||||
|
|
||||||
// run is the main loop for the worker which receives offsets to preload
|
// run is the main loop for the worker which receives offsets to preload
|
||||||
func (w *worker) run() {
|
func (w *worker) run() {
|
||||||
var err error
|
var err error
|
||||||
var data []byte
|
var data []byte
|
||||||
|
defer w.setRunning(false)
|
||||||
defer func() {
|
defer func() {
|
||||||
if w.rc != nil {
|
if w.rc != nil {
|
||||||
_ = w.rc.Close()
|
_ = w.rc.Close()
|
||||||
|
w.setRunning(false)
|
||||||
}
|
}
|
||||||
w.r.workersWg.Done()
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
chunkStart, open := <-w.r.preloadQueue
|
chunkStart, open := <-w.ch
|
||||||
|
w.setRunning(true)
|
||||||
if chunkStart < 0 || !open {
|
if chunkStart < 0 || !open {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -452,7 +482,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
// we seem to be getting only errors so we abort
|
// we seem to be getting only errors so we abort
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
err = w.r.cachedObject.refreshFromSource(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -465,7 +495,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
sourceRead, err = io.ReadFull(w.rc, data)
|
sourceRead, err = io.ReadFull(w.rc, data)
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
err = w.r.cachedObject.refreshFromSource(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@@ -591,7 +621,7 @@ func (b *backgroundWriter) run() {
|
|||||||
remote := b.fs.cleanRootFromPath(absPath)
|
remote := b.fs.cleanRootFromPath(absPath)
|
||||||
b.notify(remote, BackgroundUploadStarted, nil)
|
b.notify(remote, BackgroundUploadStarted, nil)
|
||||||
fs.Infof(remote, "background upload: started upload")
|
fs.Infof(remote, "background upload: started upload")
|
||||||
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.notify(remote, BackgroundUploadError, err)
|
b.notify(remote, BackgroundUploadError, err)
|
||||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||||
@@ -601,14 +631,14 @@ func (b *backgroundWriter) run() {
|
|||||||
// clean empty dirs up to root
|
// clean empty dirs up to root
|
||||||
thisDir := cleanPath(path.Dir(remote))
|
thisDir := cleanPath(path.Dir(remote))
|
||||||
for thisDir != "" {
|
for thisDir != "" {
|
||||||
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
thisList, err := b.fs.tempFs.List(thisDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(thisList) > 0 {
|
if len(thisList) > 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
err = b.fs.tempFs.Rmdir(thisDir)
|
||||||
fs.Debugf(thisDir, "cleaned from temp path")
|
fs.Debugf(thisDir, "cleaned from temp path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
|||||||
77
backend/cache/object.go
vendored
77
backend/cache/object.go
vendored
@@ -3,16 +3,15 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/readers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -69,7 +68,7 @@ func NewObject(f *Fs, remote string) *Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ObjectFromOriginal builds one from a generic fs.Object
|
// ObjectFromOriginal builds one from a generic fs.Object
|
||||||
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||||
var co *Object
|
var co *Object
|
||||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||||
dir, name := path.Split(fullRemote)
|
dir, name := path.Split(fullRemote)
|
||||||
@@ -93,13 +92,13 @@ func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
|||||||
CacheType: cacheType,
|
CacheType: cacheType,
|
||||||
CacheTs: time.Now(),
|
CacheTs: time.Now(),
|
||||||
}
|
}
|
||||||
co.updateData(ctx, o)
|
co.updateData(o)
|
||||||
return co
|
return co
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
func (o *Object) updateData(source fs.Object) {
|
||||||
o.Object = source
|
o.Object = source
|
||||||
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
o.CacheModTime = source.ModTime().UnixNano()
|
||||||
o.CacheSize = source.Size()
|
o.CacheSize = source.Size()
|
||||||
o.CacheStorable = source.Storable()
|
o.CacheStorable = source.Storable()
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@@ -131,20 +130,20 @@ func (o *Object) abs() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
_ = o.refresh(ctx)
|
_ = o.refresh()
|
||||||
return time.Unix(0, o.CacheModTime)
|
return time.Unix(0, o.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the cached Size
|
// Size returns the cached Size
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
_ = o.refresh(context.TODO())
|
_ = o.refresh()
|
||||||
return o.CacheSize
|
return o.CacheSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns the cached Storable
|
// Storable returns the cached Storable
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
_ = o.refresh(context.TODO())
|
_ = o.refresh()
|
||||||
return o.CacheStorable
|
return o.CacheStorable
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,18 +151,18 @@ func (o *Object) Storable() bool {
|
|||||||
// all these conditions must be true to ignore a refresh
|
// all these conditions must be true to ignore a refresh
|
||||||
// 1. cache ts didn't expire yet
|
// 1. cache ts didn't expire yet
|
||||||
// 2. is not pending a notification from the wrapped fs
|
// 2. is not pending a notification from the wrapped fs
|
||||||
func (o *Object) refresh(ctx context.Context) error {
|
func (o *Object) refresh() error {
|
||||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||||
if !isExpired && !isNotified {
|
if !isExpired && !isNotified {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.refreshFromSource(ctx, true)
|
return o.refreshFromSource(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||||
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
func (o *Object) refreshFromSource(force bool) error {
|
||||||
o.refreshMutex.Lock()
|
o.refreshMutex.Lock()
|
||||||
defer o.refreshMutex.Unlock()
|
defer o.refreshMutex.Unlock()
|
||||||
var err error
|
var err error
|
||||||
@@ -173,29 +172,29 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
||||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
||||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.updateData(ctx, liveObject)
|
o.updateData(liveObject)
|
||||||
o.persist()
|
o.persist()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the ModTime of this object
|
// SetModTime sets the ModTime of this object
|
||||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
func (o *Object) SetModTime(t time.Time) error {
|
||||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
if err := o.refreshFromSource(false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.Object.SetModTime(ctx, t)
|
err := o.Object.SetModTime(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -208,19 +207,13 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open is used to request a specific part of the file using fs.RangeOption
|
// Open is used to request a specific part of the file using fs.RangeOption
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
var err error
|
if err := o.refreshFromSource(true); err != nil {
|
||||||
|
|
||||||
if o.Object == nil {
|
|
||||||
err = o.refreshFromSource(ctx, true)
|
|
||||||
} else {
|
|
||||||
err = o.refresh(ctx)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
var err error
|
||||||
|
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -239,8 +232,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update will change the object data
|
// Update will change the object data
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
if err := o.refreshFromSource(false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@@ -255,7 +248,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
|
|
||||||
// FIXME use reliable upload
|
// FIXME use reliable upload
|
||||||
err := o.Object.Update(ctx, in, src, options...)
|
err := o.Object.Update(in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error updating source: %v", err)
|
fs.Errorf(o, "error updating source: %v", err)
|
||||||
return err
|
return err
|
||||||
@@ -266,7 +259,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
// advertise to ChangeNotify if wrapped doesn't do that
|
// advertise to ChangeNotify if wrapped doesn't do that
|
||||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||||
|
|
||||||
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
o.CacheModTime = src.ModTime().UnixNano()
|
||||||
o.CacheSize = src.Size()
|
o.CacheSize = src.Size()
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@@ -276,8 +269,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove deletes the object from both the cache and the source
|
// Remove deletes the object from both the cache and the source
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove() error {
|
||||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
if err := o.refreshFromSource(false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@@ -289,7 +282,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove(ctx)
|
err := o.Object.Remove()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -307,8 +300,8 @@ func (o *Object) Remove(ctx context.Context) error {
|
|||||||
|
|
||||||
// Hash requests a hash of the object and stores in the cache
|
// Hash requests a hash of the object and stores in the cache
|
||||||
// since it might or might not be called, this is lazy loaded
|
// since it might or might not be called, this is lazy loaded
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||||
_ = o.refresh(ctx)
|
_ = o.refresh()
|
||||||
if o.CacheHashes == nil {
|
if o.CacheHashes == nil {
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
}
|
}
|
||||||
@@ -317,10 +310,10 @@ func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
|||||||
if found {
|
if found {
|
||||||
return cachedHash, nil
|
return cachedHash, nil
|
||||||
}
|
}
|
||||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
if err := o.refreshFromSource(false); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
liveHash, err := o.Object.Hash(ctx, ht)
|
liveHash, err := o.Object.Hash(ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
40
backend/cache/plex.go
vendored
40
backend/cache/plex.go
vendored
@@ -3,19 +3,20 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
"sync"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,7 +54,6 @@ type plexConnector struct {
|
|||||||
username string
|
username string
|
||||||
password string
|
password string
|
||||||
token string
|
token string
|
||||||
insecure bool
|
|
||||||
f *Fs
|
f *Fs
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
running bool
|
running bool
|
||||||
@@ -63,7 +63,7 @@ type plexConnector struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newPlexConnector connects to a Plex server and generates a token
|
// newPlexConnector connects to a Plex server and generates a token
|
||||||
func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool, saveToken func(string)) (*plexConnector, error) {
|
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
|
||||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -75,7 +75,6 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
|
|||||||
username: username,
|
username: username,
|
||||||
password: password,
|
password: password,
|
||||||
token: "",
|
token: "",
|
||||||
insecure: insecure,
|
|
||||||
stateCache: cache.New(time.Hour, time.Minute),
|
stateCache: cache.New(time.Hour, time.Minute),
|
||||||
saveToken: saveToken,
|
saveToken: saveToken,
|
||||||
}
|
}
|
||||||
@@ -84,7 +83,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string, insecure bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newPlexConnector connects to a Plex server and generates a token
|
// newPlexConnector connects to a Plex server and generates a token
|
||||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*plexConnector, error) {
|
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -94,7 +93,6 @@ func newPlexConnectorWithToken(f *Fs, plexURL, token string, insecure bool) (*pl
|
|||||||
f: f,
|
f: f,
|
||||||
url: u,
|
url: u,
|
||||||
token: token,
|
token: token,
|
||||||
insecure: insecure,
|
|
||||||
stateCache: cache.New(time.Hour, time.Minute),
|
stateCache: cache.New(time.Hour, time.Minute),
|
||||||
}
|
}
|
||||||
pc.listenWebsocket()
|
pc.listenWebsocket()
|
||||||
@@ -109,26 +107,14 @@ func (p *plexConnector) closeWebsocket() {
|
|||||||
p.running = false
|
p.running = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *plexConnector) websocketDial() (*websocket.Conn, error) {
|
|
||||||
u := strings.TrimRight(strings.Replace(strings.Replace(
|
|
||||||
p.url.String(), "http://", "ws://", 1), "https://", "wss://", 1), "/")
|
|
||||||
url := fmt.Sprintf(defPlexNotificationURL, u, p.token)
|
|
||||||
|
|
||||||
config, err := websocket.NewConfig(url, "http://localhost")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if p.insecure {
|
|
||||||
config.TlsConfig = &tls.Config{InsecureSkipVerify: true}
|
|
||||||
}
|
|
||||||
return websocket.DialConfig(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *plexConnector) listenWebsocket() {
|
func (p *plexConnector) listenWebsocket() {
|
||||||
p.runningMu.Lock()
|
p.runningMu.Lock()
|
||||||
defer p.runningMu.Unlock()
|
defer p.runningMu.Unlock()
|
||||||
|
|
||||||
conn, err := p.websocketDial()
|
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||||
|
u = strings.Replace(u, "https://", "wss://", 1)
|
||||||
|
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||||
|
"", "http://localhost")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf("plex", "%v", err)
|
fs.Errorf("plex", "%v", err)
|
||||||
return
|
return
|
||||||
|
|||||||
4
backend/cache/storage_memory.go
vendored
4
backend/cache/storage_memory.go
vendored
@@ -7,9 +7,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
cache "github.com/patrickmn/go-cache"
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Memory is a wrapper of transient storage for a go-cache store
|
// Memory is a wrapper of transient storage for a go-cache store
|
||||||
|
|||||||
24
backend/cache/storage_persistent.go
vendored
24
backend/cache/storage_persistent.go
vendored
@@ -3,23 +3,25 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
bolt "github.com/coreos/bbolt"
|
bolt "github.com/coreos/bbolt"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/walk"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/walk"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -399,7 +401,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||||
}
|
}
|
||||||
@@ -810,7 +812,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
@@ -1015,7 +1017,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||||
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
@@ -1024,7 +1026,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []fs.Object
|
var queuedEntries []fs.Object
|
||||||
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
queuedEntries = append(queuedEntries, oo)
|
queuedEntries = append(queuedEntries, oo)
|
||||||
@@ -1050,7 +1052,7 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||||
}
|
}
|
||||||
err = bucket.Put([]byte(destPath), encoded)
|
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,605 +0,0 @@
|
|||||||
package chunker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command line flags
|
|
||||||
var (
|
|
||||||
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
|
||||||
)
|
|
||||||
|
|
||||||
// test that chunking does not break large uploads
|
|
||||||
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|
||||||
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
|
||||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
|
||||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
|
||||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
|
||||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// test chunk name parser
|
|
||||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
|
||||||
saveOpt := f.opt
|
|
||||||
defer func() {
|
|
||||||
// restore original settings (f is pointer, f.opt is struct)
|
|
||||||
f.opt = saveOpt
|
|
||||||
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
|
||||||
}()
|
|
||||||
|
|
||||||
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
|
||||||
err := f.setChunkNameFormat(pattern)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
|
||||||
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
|
||||||
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
assertFormatValid := func(pattern string) {
|
|
||||||
err := f.setChunkNameFormat(pattern)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
assertFormatInvalid := func(pattern string) {
|
|
||||||
err := f.setChunkNameFormat(pattern)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
|
||||||
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
|
||||||
assert.Equal(t, wantChunkName, gotChunkName)
|
|
||||||
}
|
|
||||||
|
|
||||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
|
||||||
assert.Panics(t, func() {
|
|
||||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
|
||||||
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
|
|
||||||
}
|
|
||||||
|
|
||||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
|
|
||||||
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
|
|
||||||
assert.Equal(t, wantMainName, gotMainName)
|
|
||||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
|
||||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
|
||||||
assert.Equal(t, wantXactNo, gotXactNo)
|
|
||||||
}
|
|
||||||
|
|
||||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
|
||||||
|
|
||||||
// valid formats
|
|
||||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
if newFormatSupported {
|
|
||||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
}
|
|
||||||
|
|
||||||
// invalid formats
|
|
||||||
assertFormatInvalid(`chunk-#`)
|
|
||||||
assertFormatInvalid(`*-chunk`)
|
|
||||||
assertFormatInvalid(`*-*-chunk-#`)
|
|
||||||
assertFormatInvalid(`*-chunk-#-#`)
|
|
||||||
assertFormatInvalid(`#-chunk-*`)
|
|
||||||
assertFormatInvalid(`*/#`)
|
|
||||||
|
|
||||||
assertFormatValid(`*#`)
|
|
||||||
assertFormatInvalid(`**#`)
|
|
||||||
assertFormatInvalid(`#*`)
|
|
||||||
assertFormatInvalid(``)
|
|
||||||
assertFormatInvalid(`-`)
|
|
||||||
|
|
||||||
// quick tests
|
|
||||||
if newFormatSupported {
|
|
||||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
f.opt.StartFrom = 1
|
|
||||||
|
|
||||||
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
|
|
||||||
assertParseName(`part_fish_43`, "fish", 42, "", -1)
|
|
||||||
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
|
|
||||||
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
|
|
||||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
|
|
||||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
|
|
||||||
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
|
|
||||||
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare format for long tests
|
|
||||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
|
||||||
f.opt.StartFrom = 2
|
|
||||||
|
|
||||||
// valid data chunks
|
|
||||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
|
|
||||||
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
|
|
||||||
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
|
|
||||||
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
|
|
||||||
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
|
|
||||||
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
|
|
||||||
|
|
||||||
// parsing invalid data chunk names
|
|
||||||
assertParseName(`fish.chunk.3`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.001`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.21`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
|
|
||||||
|
|
||||||
// valid control chunks
|
|
||||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
|
|
||||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
|
|
||||||
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
|
|
||||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
|
|
||||||
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
|
|
||||||
|
|
||||||
// valid temporary control chunks
|
|
||||||
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
|
||||||
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
|
|
||||||
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
|
|
||||||
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
|
||||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
|
|
||||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
|
||||||
|
|
||||||
// parsing invalid control chunk names
|
|
||||||
assertParseName(`fish.chunk.info`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._os`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
|
||||||
|
|
||||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
|
||||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
|
|
||||||
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
|
|
||||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
|
|
||||||
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
|
|
||||||
assertMakeNamePanics("fish", -1, "in", -1)
|
|
||||||
assertMakeNamePanics("fish", -1, "up", 4)
|
|
||||||
assertMakeNamePanics("fish", -1, "x", -1)
|
|
||||||
assertMakeNamePanics("fish", -1, "c", 4)
|
|
||||||
|
|
||||||
// base file name can sometimes look like a valid chunk name
|
|
||||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
|
|
||||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
|
|
||||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
|
|
||||||
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
|
|
||||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
|
|
||||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
|
|
||||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
|
|
||||||
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
|
|
||||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
|
||||||
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
|
|
||||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
|
||||||
|
|
||||||
// attempts to make invalid chunk names
|
|
||||||
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
|
|
||||||
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
|
|
||||||
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
|
|
||||||
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "_info", -1)
|
|
||||||
assertMakeNamePanics("fish", -1, "info_", -1)
|
|
||||||
assertMakeNamePanics("fish", -2, ".bind", -3)
|
|
||||||
assertMakeNamePanics("fish", -2, "bind.", -3)
|
|
||||||
|
|
||||||
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
|
|
||||||
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
|
|
||||||
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
|
|
||||||
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
|
|
||||||
assertMakeNamePanics("fish", -1, "_info", 15678)
|
|
||||||
assertMakeNamePanics("fish", -1, "info_", 999)
|
|
||||||
assertMakeNamePanics("fish", -2, ".bind", 0)
|
|
||||||
assertMakeNamePanics("fish", -2, "bind.", 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
|
||||||
const dir = "small"
|
|
||||||
ctx := context.Background()
|
|
||||||
saveOpt := f.opt
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
}()
|
|
||||||
f.opt.FailHard = false
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
|
|
||||||
checkSmallFileInternals := func(obj fs.Object) {
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
o, ok := obj.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.NotNil(t, o)
|
|
||||||
if o == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case !f.useMeta:
|
|
||||||
// If meta format is "none", non-chunked file (even empty)
|
|
||||||
// internally is a single chunk without meta object.
|
|
||||||
assert.Nil(t, o.main)
|
|
||||||
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
|
||||||
assert.Equal(t, 1, len(o.chunks))
|
|
||||||
case f.hashAll:
|
|
||||||
// Consistent hashing forces meta object on small files too
|
|
||||||
assert.NotNil(t, o.main)
|
|
||||||
assert.True(t, o.isComposite())
|
|
||||||
assert.Equal(t, 1, len(o.chunks))
|
|
||||||
default:
|
|
||||||
// normally non-chunked file is kept in the Object's main field
|
|
||||||
assert.NotNil(t, o.main)
|
|
||||||
assert.False(t, o.isComposite())
|
|
||||||
assert.Equal(t, 0, len(o.chunks))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
checkContents := func(obj fs.Object, contents string) {
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
|
||||||
|
|
||||||
r, err := obj.Open(ctx)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, r)
|
|
||||||
if r == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
data, err := ioutil.ReadAll(r)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, contents, string(data))
|
|
||||||
_ = r.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
checkHashsum := func(obj fs.Object) {
|
|
||||||
var ht hash.Type
|
|
||||||
switch {
|
|
||||||
case !f.hashAll:
|
|
||||||
return
|
|
||||||
case f.useMD5:
|
|
||||||
ht = hash.MD5
|
|
||||||
case f.useSHA1:
|
|
||||||
ht = hash.SHA1
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// even empty files must have hashsum in consistent mode
|
|
||||||
sum, err := obj.Hash(ctx, ht)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEqual(t, sum, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
checkSmallFile := func(name, contents string) {
|
|
||||||
filename := path.Join(dir, name)
|
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
|
||||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
|
||||||
assert.NotNil(t, put)
|
|
||||||
checkSmallFileInternals(put)
|
|
||||||
checkContents(put, contents)
|
|
||||||
checkHashsum(put)
|
|
||||||
|
|
||||||
// objects returned by Put and NewObject must have similar structure
|
|
||||||
obj, err := f.NewObject(ctx, filename)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
checkSmallFileInternals(obj)
|
|
||||||
checkContents(obj, contents)
|
|
||||||
checkHashsum(obj)
|
|
||||||
|
|
||||||
_ = obj.Remove(ctx)
|
|
||||||
_ = put.Remove(ctx) // for good
|
|
||||||
}
|
|
||||||
|
|
||||||
checkSmallFile("emptyfile", "")
|
|
||||||
checkSmallFile("smallfile", "Ok")
|
|
||||||
}
|
|
||||||
|
|
||||||
func testPreventCorruption(t *testing.T, f *Fs) {
|
|
||||||
if f.opt.ChunkSize > 50 {
|
|
||||||
t.Skip("this test requires small chunks")
|
|
||||||
}
|
|
||||||
const dir = "corrupted"
|
|
||||||
ctx := context.Background()
|
|
||||||
saveOpt := f.opt
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
}()
|
|
||||||
f.opt.FailHard = true
|
|
||||||
|
|
||||||
contents := random.String(250)
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
const overlapMessage = "chunk overlap"
|
|
||||||
|
|
||||||
assertOverlapError := func(err error) {
|
|
||||||
assert.Error(t, err)
|
|
||||||
if err != nil {
|
|
||||||
assert.Contains(t, err.Error(), overlapMessage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newFile := func(name string) fs.Object {
|
|
||||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
|
||||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
require.NotNil(t, obj)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
billyObj := newFile("billy")
|
|
||||||
|
|
||||||
billyChunkName := func(chunkNo int) string {
|
|
||||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := f.Mkdir(ctx, billyChunkName(1))
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
|
||||||
|
|
||||||
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
|
||||||
|
|
||||||
// accessing chunks in strict mode is prohibited
|
|
||||||
f.opt.FailHard = true
|
|
||||||
billyChunk4Name := billyChunkName(4)
|
|
||||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NotNil(t, billyChunk4)
|
|
||||||
|
|
||||||
f.opt.FailHard = true
|
|
||||||
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
// you can freely read chunks (if you have an object)
|
|
||||||
r, err := billyChunk4.Open(ctx)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
var chunkContents []byte
|
|
||||||
assert.NotPanics(t, func() {
|
|
||||||
chunkContents, err = ioutil.ReadAll(r)
|
|
||||||
_ = r.Close()
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotEqual(t, contents, string(chunkContents))
|
|
||||||
|
|
||||||
// but you can't change them
|
|
||||||
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
// Remove isn't special, you can't corrupt files even if you have an object
|
|
||||||
err = billyChunk4.Remove(ctx)
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
// recreate billy in case it was anyhow corrupted
|
|
||||||
willyObj := newFile("willy")
|
|
||||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
|
|
||||||
f.opt.FailHard = false
|
|
||||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
|
||||||
f.opt.FailHard = true
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NotNil(t, willyChunk)
|
|
||||||
|
|
||||||
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
|
||||||
assertOverlapError(err)
|
|
||||||
|
|
||||||
// operations.Move will return error when chunker's Move refused
|
|
||||||
// to corrupt target file, but reverts to copy/delete method
|
|
||||||
// still trying to delete target chunk. Chunker must come to rescue.
|
|
||||||
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
|
||||||
assertOverlapError(err)
|
|
||||||
r, err = willyChunk.Open(ctx)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.NotPanics(t, func() {
|
|
||||||
_, err = ioutil.ReadAll(r)
|
|
||||||
_ = r.Close()
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
|
||||||
if f.opt.ChunkSize > 50 {
|
|
||||||
t.Skip("this test requires small chunks")
|
|
||||||
}
|
|
||||||
const dir = "wreaked"
|
|
||||||
const wreakNumber = 10200300
|
|
||||||
ctx := context.Background()
|
|
||||||
saveOpt := f.opt
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
}()
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
contents := random.String(100)
|
|
||||||
|
|
||||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
|
||||||
filename := path.Join(dir, name)
|
|
||||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
|
||||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
|
||||||
require.NotNil(t, obj)
|
|
||||||
return obj, filename
|
|
||||||
}
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
file, fileName := newFile(f, "wreaker")
|
|
||||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
|
||||||
_, err := f.NewObject(ctx, fileName)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
f.opt.FailHard = true
|
|
||||||
_, err = f.List(ctx, dir)
|
|
||||||
assert.Error(t, err)
|
|
||||||
_, err = f.NewObject(ctx, fileName)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = wreak.Remove(ctx)
|
|
||||||
_ = file.Remove(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testMetadataInput(t *testing.T, f *Fs) {
|
|
||||||
const minChunkForTest = 50
|
|
||||||
if f.opt.ChunkSize < minChunkForTest {
|
|
||||||
t.Skip("this test requires chunks that fit metadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
const dir = "usermeta"
|
|
||||||
ctx := context.Background()
|
|
||||||
saveOpt := f.opt
|
|
||||||
defer func() {
|
|
||||||
f.opt.FailHard = false
|
|
||||||
_ = operations.Purge(ctx, f.base, dir)
|
|
||||||
f.opt = saveOpt
|
|
||||||
}()
|
|
||||||
f.opt.FailHard = false
|
|
||||||
|
|
||||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
|
||||||
|
|
||||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
|
||||||
item := fstest.Item{Path: name, ModTime: modTime}
|
|
||||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
|
||||||
assert.NotNil(t, obj, message)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
runSubtest := func(contents, name string) {
|
|
||||||
description := fmt.Sprintf("file with %s metadata", name)
|
|
||||||
filename := path.Join(dir, name)
|
|
||||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
|
||||||
|
|
||||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
|
|
||||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
|
||||||
|
|
||||||
obj, err := f.NewObject(ctx, filename)
|
|
||||||
assert.NoError(t, err, "access "+description)
|
|
||||||
assert.NotNil(t, obj)
|
|
||||||
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
|
||||||
|
|
||||||
o, ok := obj.(*Object)
|
|
||||||
assert.NotNil(t, ok)
|
|
||||||
if o != nil {
|
|
||||||
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
|
||||||
o = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
_ = obj.Remove(ctx)
|
|
||||||
_ = part.Remove(ctx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
r, err := obj.Open(ctx)
|
|
||||||
assert.NoError(t, err, "open "+description)
|
|
||||||
assert.NotNil(t, r, "open stream of "+description)
|
|
||||||
if err == nil && r != nil {
|
|
||||||
data, err := ioutil.ReadAll(r)
|
|
||||||
assert.NoError(t, err, "read all of "+description)
|
|
||||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
|
||||||
_ = r.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
todaysMeta := string(metaData)
|
|
||||||
runSubtest(todaysMeta, "today")
|
|
||||||
|
|
||||||
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
|
||||||
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
|
||||||
runSubtest(pastMeta, "past")
|
|
||||||
|
|
||||||
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
|
||||||
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
|
||||||
runSubtest(futureMeta, "future")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalTest dispatches all internal tests
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
t.Run("PutLarge", func(t *testing.T) {
|
|
||||||
if *UploadKilobytes <= 0 {
|
|
||||||
t.Skip("-upload-kilobytes is not set")
|
|
||||||
}
|
|
||||||
testPutLarge(t, f, *UploadKilobytes)
|
|
||||||
})
|
|
||||||
t.Run("ChunkNameFormat", func(t *testing.T) {
|
|
||||||
testChunkNameFormat(t, f)
|
|
||||||
})
|
|
||||||
t.Run("SmallFileInternals", func(t *testing.T) {
|
|
||||||
testSmallFileInternals(t, f)
|
|
||||||
})
|
|
||||||
t.Run("PreventCorruption", func(t *testing.T) {
|
|
||||||
testPreventCorruption(t, f)
|
|
||||||
})
|
|
||||||
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
|
||||||
testChunkNumberOverflow(t, f)
|
|
||||||
})
|
|
||||||
t.Run("MetadataInput", func(t *testing.T) {
|
|
||||||
testMetadataInput(t, f)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
// Test the Chunker filesystem interface
|
|
||||||
package chunker_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
|
||||||
"github.com/rclone/rclone/backend/chunker"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Command line flags
|
|
||||||
var (
|
|
||||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
|
||||||
// We enable testing with invalid characters when -remote is not set, so
|
|
||||||
// chunker overlays a local directory, but invalid characters are disabled
|
|
||||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
|
||||||
// You can still test with invalid characters using the below flag.
|
|
||||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against a concrete remote
|
|
||||||
// set by the -remote flag. If the flag is not set, it creates a
|
|
||||||
// dynamic chunker overlay wrapping a local temporary directory.
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
opt := fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
NilObject: (*chunker.Object)(nil),
|
|
||||||
SkipBadWindowsCharacters: !*UseBadChars,
|
|
||||||
UnimplementableObjectMethods: []string{
|
|
||||||
"MimeType",
|
|
||||||
"GetTier",
|
|
||||||
"SetTier",
|
|
||||||
},
|
|
||||||
UnimplementableFsMethods: []string{
|
|
||||||
"PublicLink",
|
|
||||||
"OpenWriterAt",
|
|
||||||
"MergeDirs",
|
|
||||||
"DirCacheFlush",
|
|
||||||
"UserInfo",
|
|
||||||
"Disconnect",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
name := "TestChunker"
|
|
||||||
opt.RemoteName = name + ":"
|
|
||||||
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "chunker"},
|
|
||||||
{Name: name, Key: "remote", Value: tempDir},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,6 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
@@ -14,13 +13,15 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rfjakob/eme"
|
|
||||||
"golang.org/x/crypto/nacl/secretbox"
|
"golang.org/x/crypto/nacl/secretbox"
|
||||||
"golang.org/x/crypto/scrypt"
|
"golang.org/x/crypto/scrypt"
|
||||||
|
|
||||||
|
"github.com/rfjakob/eme"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@@ -42,7 +43,6 @@ var (
|
|||||||
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars")
|
||||||
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize")
|
||||||
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
ErrorTooShortAfterDecode = errors.New("too short after base32 decode")
|
||||||
ErrorTooLongAfterDecode = errors.New("too long after base32 decode")
|
|
||||||
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted")
|
||||||
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
ErrorEncryptedFileBadHeader = errors.New("file has truncated block header")
|
||||||
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string")
|
||||||
@@ -69,7 +69,7 @@ type ReadSeekCloser interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||||
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
||||||
|
|
||||||
// Cipher is used to swap out the encryption implementations
|
// Cipher is used to swap out the encryption implementations
|
||||||
type Cipher interface {
|
type Cipher interface {
|
||||||
@@ -86,7 +86,7 @@ type Cipher interface {
|
|||||||
// DecryptData
|
// DecryptData
|
||||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||||
// DecryptDataSeek decrypt at a given position
|
// DecryptDataSeek decrypt at a given position
|
||||||
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||||
// EncryptedSize calculates the size of the data when encrypted
|
// EncryptedSize calculates the size of the data when encrypted
|
||||||
EncryptedSize(int64) int64
|
EncryptedSize(int64) int64
|
||||||
// DecryptedSize calculates the size of the data when decrypted
|
// DecryptedSize calculates the size of the data when decrypted
|
||||||
@@ -208,6 +208,21 @@ func (c *cipher) putBlock(buf []byte) {
|
|||||||
c.buffers.Put(buf)
|
c.buffers.Put(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check to see if the byte string is valid with no control characters
|
||||||
|
// from 0x00 to 0x1F and is a valid UTF-8 string
|
||||||
|
func checkValidString(buf []byte) error {
|
||||||
|
for i := range buf {
|
||||||
|
c := buf[i]
|
||||||
|
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||||
|
return ErrorBadDecryptControlChar
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !utf8.Valid(buf) {
|
||||||
|
return ErrorBadDecryptUTF8
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// encodeFileName encodes a filename using a modified version of
|
// encodeFileName encodes a filename using a modified version of
|
||||||
// standard base32 as described in RFC4648
|
// standard base32 as described in RFC4648
|
||||||
//
|
//
|
||||||
@@ -271,14 +286,15 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
|||||||
// not possible if decodeFilename() working correctly
|
// not possible if decodeFilename() working correctly
|
||||||
return "", ErrorTooShortAfterDecode
|
return "", ErrorTooShortAfterDecode
|
||||||
}
|
}
|
||||||
if len(rawCiphertext) > 2048 {
|
|
||||||
return "", ErrorTooLongAfterDecode
|
|
||||||
}
|
|
||||||
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt)
|
||||||
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
err = checkValidString(plaintext)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
return string(plaintext), err
|
return string(plaintext), err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -445,7 +461,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
if int(newRune) < base {
|
if int(newRune) < base {
|
||||||
newRune += 256
|
newRune += 256
|
||||||
}
|
}
|
||||||
_, _ = result.WriteRune(newRune)
|
_, _ = result.WriteRune(rune(newRune))
|
||||||
|
|
||||||
default:
|
default:
|
||||||
_, _ = result.WriteRune(runeValue)
|
_, _ = result.WriteRune(runeValue)
|
||||||
@@ -730,29 +746,29 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||||
}
|
}
|
||||||
// retrieve the nonce
|
// retreive the nonce
|
||||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||||
fh.initialNonce = fh.nonce
|
fh.initialNonce = fh.nonce
|
||||||
return fh, nil
|
return fh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||||
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||||
var rc io.ReadCloser
|
var rc io.ReadCloser
|
||||||
doRangeSeek := false
|
doRangeSeek := false
|
||||||
setLimit := false
|
setLimit := false
|
||||||
// Open initially with no seek
|
// Open initially with no seek
|
||||||
if offset == 0 && limit < 0 {
|
if offset == 0 && limit < 0 {
|
||||||
// If no offset or limit then open whole file
|
// If no offset or limit then open whole file
|
||||||
rc, err = open(ctx, 0, -1)
|
rc, err = open(0, -1)
|
||||||
} else if offset == 0 {
|
} else if offset == 0 {
|
||||||
// If no offset open the header + limit worth of the file
|
// If no offset open the header + limit worth of the file
|
||||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||||
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
|
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
||||||
setLimit = true
|
setLimit = true
|
||||||
} else {
|
} else {
|
||||||
// Otherwise just read the header to start with
|
// Otherwise just read the header to start with
|
||||||
rc, err = open(ctx, 0, int64(fileHeaderSize))
|
rc, err = open(0, int64(fileHeaderSize))
|
||||||
doRangeSeek = true
|
doRangeSeek = true
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -765,7 +781,7 @@ func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offse
|
|||||||
}
|
}
|
||||||
fh.open = open // will be called by fh.RangeSeek
|
fh.open = open // will be called by fh.RangeSeek
|
||||||
if doRangeSeek {
|
if doRangeSeek {
|
||||||
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
|
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = fh.Close()
|
_ = fh.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -885,7 +901,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
|
|||||||
// limiting the total length to limit.
|
// limiting the total length to limit.
|
||||||
//
|
//
|
||||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||||
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
|
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
||||||
fh.mu.Lock()
|
fh.mu.Lock()
|
||||||
defer fh.mu.Unlock()
|
defer fh.mu.Unlock()
|
||||||
|
|
||||||
@@ -912,7 +928,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
// Can we seek underlying stream directly?
|
// Can we seek underlying stream directly?
|
||||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||||
// Seek underlying stream directly
|
// Seek underlying stream directly
|
||||||
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
|
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(err)
|
return 0, fh.finish(err)
|
||||||
}
|
}
|
||||||
@@ -922,7 +938,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
fh.rc = nil
|
fh.rc = nil
|
||||||
|
|
||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||||
}
|
}
|
||||||
@@ -951,7 +967,7 @@ func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, li
|
|||||||
|
|
||||||
// Seek implements the io.Seeker interface
|
// Seek implements the io.Seeker interface
|
||||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||||
return fh.RangeSeek(context.TODO(), offset, whence, -1)
|
return fh.RangeSeek(offset, whence, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish sets the final error and tidies up
|
// finish sets the final error and tidies up
|
||||||
@@ -1025,8 +1041,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
// The open function must return a ReadCloser opened to the offset supplied
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
out, err := c.newDecrypterSeek(open, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -10,8 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -44,6 +43,69 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
|||||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestValidString(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
expected error
|
||||||
|
}{
|
||||||
|
{"", nil},
|
||||||
|
{"\x01", ErrorBadDecryptControlChar},
|
||||||
|
{"a\x02", ErrorBadDecryptControlChar},
|
||||||
|
{"abc\x03", ErrorBadDecryptControlChar},
|
||||||
|
{"abc\x04def", ErrorBadDecryptControlChar},
|
||||||
|
{"\x05d", ErrorBadDecryptControlChar},
|
||||||
|
{"\x06def", ErrorBadDecryptControlChar},
|
||||||
|
{"\x07", ErrorBadDecryptControlChar},
|
||||||
|
{"\x08", ErrorBadDecryptControlChar},
|
||||||
|
{"\x09", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0A", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0B", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0C", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0D", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0E", ErrorBadDecryptControlChar},
|
||||||
|
{"\x0F", ErrorBadDecryptControlChar},
|
||||||
|
{"\x10", ErrorBadDecryptControlChar},
|
||||||
|
{"\x11", ErrorBadDecryptControlChar},
|
||||||
|
{"\x12", ErrorBadDecryptControlChar},
|
||||||
|
{"\x13", ErrorBadDecryptControlChar},
|
||||||
|
{"\x14", ErrorBadDecryptControlChar},
|
||||||
|
{"\x15", ErrorBadDecryptControlChar},
|
||||||
|
{"\x16", ErrorBadDecryptControlChar},
|
||||||
|
{"\x17", ErrorBadDecryptControlChar},
|
||||||
|
{"\x18", ErrorBadDecryptControlChar},
|
||||||
|
{"\x19", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1A", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1B", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1C", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1D", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1E", ErrorBadDecryptControlChar},
|
||||||
|
{"\x1F", ErrorBadDecryptControlChar},
|
||||||
|
{"\x20", nil},
|
||||||
|
{"\x7E", nil},
|
||||||
|
{"\x7F", ErrorBadDecryptControlChar},
|
||||||
|
{"£100", nil},
|
||||||
|
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
|
||||||
|
{"£100", nil},
|
||||||
|
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
|
||||||
|
{"a", nil}, // Valid ASCII
|
||||||
|
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
|
||||||
|
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
|
||||||
|
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
|
||||||
|
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
|
||||||
|
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
|
||||||
|
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
|
||||||
|
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
|
||||||
|
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
|
||||||
|
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
|
||||||
|
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
|
||||||
|
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
|
||||||
|
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
|
||||||
|
} {
|
||||||
|
actual := checkValidString([]byte(test.in))
|
||||||
|
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestEncodeFileName(t *testing.T) {
|
func TestEncodeFileName(t *testing.T) {
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
@@ -132,10 +194,6 @@ func TestEncryptSegment(t *testing.T) {
|
|||||||
|
|
||||||
func TestDecryptSegment(t *testing.T) {
|
func TestDecryptSegment(t *testing.T) {
|
||||||
// We've tested the forwards above, now concentrate on the errors
|
// We've tested the forwards above, now concentrate on the errors
|
||||||
longName := make([]byte, 3328)
|
|
||||||
for i := range longName {
|
|
||||||
longName[i] = 'a'
|
|
||||||
}
|
|
||||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
in string
|
in string
|
||||||
@@ -143,10 +201,11 @@ func TestDecryptSegment(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{"64=", ErrorBadBase32Encoding},
|
{"64=", ErrorBadBase32Encoding},
|
||||||
{"!", base32.CorruptInputError(0)},
|
{"!", base32.CorruptInputError(0)},
|
||||||
{string(longName), ErrorTooLongAfterDecode},
|
|
||||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||||
|
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
|
||||||
|
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
|
||||||
} {
|
} {
|
||||||
actual, actualErr := c.decryptSegment(test.in)
|
actual, actualErr := c.decryptSegment(test.in)
|
||||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||||
@@ -640,16 +699,16 @@ var (
|
|||||||
|
|
||||||
// Test test infrastructure first!
|
// Test test infrastructure first!
|
||||||
func TestRandomSource(t *testing.T) {
|
func TestRandomSource(t *testing.T) {
|
||||||
source := newRandomSource(1e8)
|
source := newRandomSource(1E8)
|
||||||
sink := newRandomSource(1e8)
|
sink := newRandomSource(1E8)
|
||||||
n, err := io.Copy(sink, source)
|
n, err := io.Copy(sink, source)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, int64(1e8), n)
|
assert.Equal(t, int64(1E8), n)
|
||||||
|
|
||||||
source = newRandomSource(1e8)
|
source = newRandomSource(1E8)
|
||||||
buf := make([]byte, 16)
|
buf := make([]byte, 16)
|
||||||
_, _ = source.Read(buf)
|
_, _ = source.Read(buf)
|
||||||
sink = newRandomSource(1e8)
|
sink = newRandomSource(1E8)
|
||||||
_, err = io.Copy(sink, source)
|
_, err = io.Copy(sink, source)
|
||||||
assert.Error(t, err, "Error in stream")
|
assert.Error(t, err, "Error in stream")
|
||||||
}
|
}
|
||||||
@@ -689,23 +748,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDecrypt1(t *testing.T) {
|
func TestEncryptDecrypt1(t *testing.T) {
|
||||||
testEncryptDecrypt(t, 1, 1e7)
|
testEncryptDecrypt(t, 1, 1E7)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDecrypt32(t *testing.T) {
|
func TestEncryptDecrypt32(t *testing.T) {
|
||||||
testEncryptDecrypt(t, 32, 1e8)
|
testEncryptDecrypt(t, 32, 1E8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDecrypt4096(t *testing.T) {
|
func TestEncryptDecrypt4096(t *testing.T) {
|
||||||
testEncryptDecrypt(t, 4096, 1e8)
|
testEncryptDecrypt(t, 4096, 1E8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDecrypt65536(t *testing.T) {
|
func TestEncryptDecrypt65536(t *testing.T) {
|
||||||
testEncryptDecrypt(t, 65536, 1e8)
|
testEncryptDecrypt(t, 65536, 1E8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEncryptDecrypt65537(t *testing.T) {
|
func TestEncryptDecrypt65537(t *testing.T) {
|
||||||
testEncryptDecrypt(t, 65537, 1e8)
|
testEncryptDecrypt(t, 65537, 1E8)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -738,7 +797,7 @@ func TestEncryptData(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||||
|
|
||||||
// Check encode works
|
// Check encode works
|
||||||
buf := bytes.NewBuffer(test.in)
|
buf := bytes.NewBuffer(test.in)
|
||||||
@@ -761,7 +820,7 @@ func TestEncryptData(t *testing.T) {
|
|||||||
func TestNewEncrypter(t *testing.T) {
|
func TestNewEncrypter(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||||
|
|
||||||
z := &zeroes{}
|
z := &zeroes{}
|
||||||
|
|
||||||
@@ -788,7 +847,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
|||||||
fh, err := c.newEncrypter(in, nil)
|
fh, err := c.newEncrypter(in, nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||||
assert.Equal(t, int64(32), n)
|
assert.Equal(t, int64(32), n)
|
||||||
}
|
}
|
||||||
@@ -820,7 +879,7 @@ func (c *closeDetector) Close() error {
|
|||||||
func TestNewDecrypter(t *testing.T) {
|
func TestNewDecrypter(t *testing.T) {
|
||||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||||
|
|
||||||
cd := newCloseDetector(bytes.NewBuffer(file0))
|
cd := newCloseDetector(bytes.NewBuffer(file0))
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
@@ -871,7 +930,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
|||||||
fh, err := c.newDecrypter(in)
|
fh, err := c.newDecrypter(in)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||||
assert.Equal(t, int64(16), n)
|
assert.Equal(t, int64(16), n)
|
||||||
}
|
}
|
||||||
@@ -901,7 +960,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
|
|
||||||
// Open stream with a seek of underlyingOffset
|
// Open stream with a seek of underlyingOffset
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = int(underlyingOffset + underlyingLimit)
|
end = int(underlyingOffset + underlyingLimit)
|
||||||
@@ -942,7 +1001,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
|
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(rc, offset, limit)
|
check(rc, offset, limit)
|
||||||
@@ -950,14 +1009,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try decoding it with a single open and lots of seeks
|
// Try decoding it with a single open and lots of seeks
|
||||||
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
|
fh, err := c.DecryptDataSeek(open, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, offset := range trials {
|
for _, offset := range trials {
|
||||||
for _, limit := range limits {
|
for _, limit := range limits {
|
||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
|
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(fh, offset, limit)
|
check(fh, offset, limit)
|
||||||
@@ -1008,7 +1067,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||||
callCount := 0
|
callCount := 0
|
||||||
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
switch callCount {
|
switch callCount {
|
||||||
case 0:
|
case 0:
|
||||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||||
@@ -1020,11 +1079,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||||
}
|
}
|
||||||
callCount++
|
callCount++
|
||||||
return open(ctx, underlyingOffset, underlyingLimit)
|
return open(underlyingOffset, underlyingLimit)
|
||||||
}
|
}
|
||||||
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
|
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
|
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, gotOffset, test.offset)
|
assert.Equal(t, gotOffset, test.offset)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,20 +2,19 @@
|
|||||||
package crypt
|
package crypt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fspath"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@@ -68,16 +67,8 @@ func init() {
|
|||||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "show_mapping",
|
Name: "show_mapping",
|
||||||
Help: `For all files listed show how the names encrypt.
|
Help: "For all files listed show how the names encrypt.",
|
||||||
|
|
||||||
If this flag is set then for each file that the remote is asked to
|
|
||||||
list, it will log (at level INFO) a line stating the decrypted file
|
|
||||||
name and the encrypted file name.
|
|
||||||
|
|
||||||
This is so you can work out which encrypted names are which decrypted
|
|
||||||
names just in case you need to do something with the encrypted file
|
|
||||||
names, or for debugging purposes.`,
|
|
||||||
Default: false,
|
Default: false,
|
||||||
Hide: fs.OptionHideConfigurator,
|
Hide: fs.OptionHideConfigurator,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@@ -123,7 +114,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
|||||||
return newCipherForConfig(opt)
|
return newCipherForConfig(opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -139,20 +130,16 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
|
||||||
}
|
|
||||||
// Look for a file first
|
// Look for a file first
|
||||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
|
||||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
wrappedFs, err := fs.NewFs(remotePath)
|
||||||
// if that didn't produce a file, look for a directory
|
// if that didn't produce a file, look for a directory
|
||||||
if err != fs.ErrorIsFile {
|
if err != fs.ErrorIsFile {
|
||||||
remotePath = fspath.JoinRootPath(wPath, cipher.EncryptDirName(rpath))
|
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
|
||||||
wrappedFs, err = wInfo.NewFs(wName, remotePath, wConfig)
|
wrappedFs, err = fs.NewFs(remotePath)
|
||||||
}
|
}
|
||||||
if err != fs.ErrorIsFile && err != nil {
|
if err != fs.ErrorIsFile && err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to make remote %s:%q to wrap", wName, remotePath)
|
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
@@ -170,10 +157,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
WriteMimeType: false,
|
WriteMimeType: false,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
SetTier: true,
|
|
||||||
GetTier: true,
|
|
||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
|
|
||||||
|
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||||
|
if doChangeNotify != nil {
|
||||||
|
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||||
|
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||||
|
decrypted, err := f.DecryptFileName(path)
|
||||||
|
if err != nil {
|
||||||
|
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
notifyFunc(decrypted, entryType)
|
||||||
|
}
|
||||||
|
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +190,6 @@ type Options struct {
|
|||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
wrapper fs.Fs
|
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
opt Options
|
opt Options
|
||||||
@@ -233,7 +232,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an directory file name to entries.
|
// Encrypt an directory file name to entries.
|
||||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -243,18 +242,18 @@ func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Director
|
|||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(ctx, dir))
|
*entries = append(*entries, f.newDir(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(ctx, &newEntries, x)
|
f.addDir(&newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@@ -271,12 +270,12 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.encryptEntries(ctx, entries)
|
return f.encryptEntries(entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -295,9 +294,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||||
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||||
newEntries, err := f.encryptEntries(ctx, entries)
|
newEntries, err := f.encryptEntries(entries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -306,18 +305,18 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(o), nil
|
return f.newObject(o), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
wrappedIn, err := f.cipher.EncryptData(in)
|
wrappedIn, err := f.cipher.EncryptData(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -343,7 +342,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the data
|
// Transfer the data
|
||||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -352,13 +351,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
if ht != hash.None && hasher != nil {
|
if ht != hash.None && hasher != nil {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ctx, ht)
|
dstHash, err = o.Hash(ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
err = o.Remove(ctx)
|
err = o.Remove()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
@@ -374,13 +373,13 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
return f.put(in, src, options, f.Fs.Put)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
return f.put(in, src, options, f.Fs.Features().PutStream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@@ -391,15 +390,15 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Mkdir makes the directory (container, bucket)
|
// Mkdir makes the directory (container, bucket)
|
||||||
//
|
//
|
||||||
// Shouldn't return an error if it already exists
|
// Shouldn't return an error if it already exists
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(dir string) error {
|
||||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
@@ -408,12 +407,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge(ctx context.Context) error {
|
func (f *Fs) Purge() error {
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@@ -425,7 +424,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Copy
|
do := f.Fs.Features().Copy
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
@@ -434,7 +433,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -450,7 +449,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Move
|
do := f.Fs.Features().Move
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
@@ -459,7 +458,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -474,7 +473,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
do := f.Fs.Features().DirMove
|
do := f.Fs.Features().DirMove
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
@@ -484,14 +483,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutUnchecked
|
do := f.Fs.Features().PutUnchecked
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutUnchecked")
|
return nil, errors.New("can't PutUnchecked")
|
||||||
@@ -500,7 +499,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
o, err := do(wrappedIn, f.newObjectInfo(src))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -511,21 +510,21 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
// otherwise cleaning up old versions of files.
|
// otherwise cleaning up old versions of files.
|
||||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
func (f *Fs) CleanUp() error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("can't CleanUp")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do()
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
func (f *Fs) About() (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("About not supported")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do(ctx)
|
return do()
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
@@ -533,16 +532,6 @@ func (f *Fs) UnWrap() fs.Fs {
|
|||||||
return f.Fs
|
return f.Fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// WrapFs returns the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) WrapFs() fs.Fs {
|
|
||||||
return f.wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetWrapper sets the Fs that is wrapping this Fs
|
|
||||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
|
||||||
f.wrapper = wrapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncryptFileName returns an encrypted file name
|
// EncryptFileName returns an encrypted file name
|
||||||
func (f *Fs) EncryptFileName(fileName string) string {
|
func (f *Fs) EncryptFileName(fileName string) string {
|
||||||
return f.cipher.EncryptFileName(fileName)
|
return f.cipher.EncryptFileName(fileName)
|
||||||
@@ -554,13 +543,13 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||||
// src with it, and calculates the hash given by HashType on the fly
|
// src with it, and calcuates the hash given by HashType on the fly
|
||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
@@ -590,7 +579,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err = src.Open(ctx)
|
in, err = src.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open src")
|
return "", errors.Wrap(err, "failed to open src")
|
||||||
}
|
}
|
||||||
@@ -615,75 +604,6 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
|||||||
return m.Sums()[hashType], nil
|
return m.Sums()[hashType], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
|
||||||
// in into the first one and rmdirs the other directories.
|
|
||||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
|
||||||
do := f.Fs.Features().MergeDirs
|
|
||||||
if do == nil {
|
|
||||||
return errors.New("MergeDirs not supported")
|
|
||||||
}
|
|
||||||
out := make([]fs.Directory, len(dirs))
|
|
||||||
for i, dir := range dirs {
|
|
||||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
|
||||||
}
|
|
||||||
return do(ctx, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
|
||||||
// as an optional interface
|
|
||||||
func (f *Fs) DirCacheFlush() {
|
|
||||||
do := f.Fs.Features().DirCacheFlush
|
|
||||||
if do != nil {
|
|
||||||
do()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
|
||||||
do := f.Fs.Features().PublicLink
|
|
||||||
if do == nil {
|
|
||||||
return "", errors.New("PublicLink not supported")
|
|
||||||
}
|
|
||||||
o, err := f.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
// assume it is a directory
|
|
||||||
return do(ctx, f.cipher.EncryptDirName(remote))
|
|
||||||
}
|
|
||||||
return do(ctx, o.(*Object).Object.Remote())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
|
||||||
// that has had changes. If the implementation
|
|
||||||
// uses polling, it should adhere to the given interval.
|
|
||||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
|
||||||
do := f.Fs.Features().ChangeNotify
|
|
||||||
if do == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
|
||||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
decrypted string
|
|
||||||
)
|
|
||||||
switch entryType {
|
|
||||||
case fs.EntryDirectory:
|
|
||||||
decrypted, err = f.cipher.DecryptDirName(path)
|
|
||||||
case fs.EntryObject:
|
|
||||||
decrypted, err = f.cipher.DecryptFileName(path)
|
|
||||||
default:
|
|
||||||
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
notifyFunc(decrypted, entryType)
|
|
||||||
}
|
|
||||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object describes a wrapped for being read from the Fs
|
// Object describes a wrapped for being read from the Fs
|
||||||
//
|
//
|
||||||
// This decrypts the remote name and decrypts the data
|
// This decrypts the remote name and decrypts the data
|
||||||
@@ -734,7 +654,7 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -744,7 +664,7 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@@ -758,10 +678,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
openOptions = append(openOptions, option)
|
openOptions = append(openOptions, option)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||||
// Open with no seek
|
// Open with no seek
|
||||||
return o.Object.Open(ctx, openOptions...)
|
return o.Object.Open(openOptions...)
|
||||||
}
|
}
|
||||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||||
end := int64(-1)
|
end := int64(-1)
|
||||||
@@ -772,7 +692,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||||
return o.Object.Open(ctx, newOpenOptions...)
|
return o.Object.Open(newOpenOptions...)
|
||||||
}, offset, limit)
|
}, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -781,17 +701,17 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return o.Object, o.Object.Update(ctx, in, src, options...)
|
return o.Object, o.Object.Update(in, src, options...)
|
||||||
}
|
}
|
||||||
_, err := o.f.put(ctx, in, src, options, update)
|
_, err := o.f.put(in, src, options, update)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||||
newDir := fs.NewDirCopy(ctx, dir)
|
newDir := fs.NewDirCopy(dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -802,24 +722,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
|||||||
return newDir
|
return newDir
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserInfo returns info about the connected user
|
|
||||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
|
||||||
do := f.Fs.Features().UserInfo
|
|
||||||
if do == nil {
|
|
||||||
return nil, fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disconnect the current user
|
|
||||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
|
||||||
do := f.Fs.Features().Disconnect
|
|
||||||
if do == nil {
|
|
||||||
return fs.ErrorNotImplemented
|
|
||||||
}
|
|
||||||
return do(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
//
|
//
|
||||||
// This encrypts the remote name and adjusts the size
|
// This encrypts the remote name and adjusts the size
|
||||||
@@ -856,38 +758,10 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *Object) ID() string {
|
|
||||||
do, ok := o.Object.(fs.IDer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier performs changing storage tier of the Object if
|
|
||||||
// multiple storage classes supported
|
|
||||||
func (o *Object) SetTier(tier string) error {
|
|
||||||
do, ok := o.Object.(fs.SetTierer)
|
|
||||||
if !ok {
|
|
||||||
return errors.New("crypt: underlying remote does not support SetTier")
|
|
||||||
}
|
|
||||||
return do.SetTier(tier)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTier returns storage tier or class of the Object
|
|
||||||
func (o *Object) GetTier() string {
|
|
||||||
do, ok := o.Object.(fs.GetTierer)
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return do.GetTier()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = (*Fs)(nil)
|
_ fs.Fs = (*Fs)(nil)
|
||||||
@@ -901,17 +775,7 @@ var (
|
|||||||
_ fs.UnWrapper = (*Fs)(nil)
|
_ fs.UnWrapper = (*Fs)(nil)
|
||||||
_ fs.ListRer = (*Fs)(nil)
|
_ fs.ListRer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Wrapper = (*Fs)(nil)
|
|
||||||
_ fs.MergeDirser = (*Fs)(nil)
|
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
||||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
||||||
_ fs.PublicLinker = (*Fs)(nil)
|
|
||||||
_ fs.UserInfoer = (*Fs)(nil)
|
|
||||||
_ fs.Disconnecter = (*Fs)(nil)
|
|
||||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
|
||||||
_ fs.SetTierer = (*Object)(nil)
|
|
||||||
_ fs.GetTierer = (*Object)(nil)
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,33 +6,14 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/crypt"
|
"github.com/ncw/rclone/backend/crypt"
|
||||||
_ "github.com/rclone/rclone/backend/drive" // for integration tests
|
_ "github.com/ncw/rclone/backend/local"
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
_ "github.com/rclone/rclone/backend/swift" // for integration tests
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
t.Skip("Skipping as -remote not set")
|
|
||||||
}
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: *fstest.RemoteName,
|
|
||||||
NilObject: (*crypt.Object)(nil),
|
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestStandard runs integration tests against the remote
|
// TestStandard runs integration tests against the remote
|
||||||
func TestStandard(t *testing.T) {
|
func TestStandard(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||||
name := "TestCrypt"
|
name := "TestCrypt"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -44,16 +25,11 @@ func TestStandard(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestOff runs integration tests against the remote
|
// TestOff runs integration tests against the remote
|
||||||
func TestOff(t *testing.T) {
|
func TestOff(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||||
name := "TestCrypt2"
|
name := "TestCrypt2"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -65,16 +41,11 @@ func TestOff(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||||
},
|
},
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestObfuscate runs integration tests against the remote
|
// TestObfuscate runs integration tests against the remote
|
||||||
func TestObfuscate(t *testing.T) {
|
func TestObfuscate(t *testing.T) {
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||||
name := "TestCrypt3"
|
name := "TestCrypt3"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
@@ -86,8 +57,6 @@ func TestObfuscate(t *testing.T) {
|
|||||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||||
},
|
},
|
||||||
SkipBadWindowsCharacters: true,
|
SkipBadWindowsCharacters: true,
|
||||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
|
||||||
UnimplementableObjectMethods: []string{"MimeType"},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,81 +1,62 @@
|
|||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"mime"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDriveScopes(t *testing.T) {
|
const exampleExportFormats = `{
|
||||||
for _, test := range []struct {
|
"application/vnd.google-apps.document": [
|
||||||
in string
|
"application/rtf",
|
||||||
want []string
|
"application/vnd.oasis.opendocument.text",
|
||||||
wantFlag bool
|
"text/html",
|
||||||
}{
|
"application/pdf",
|
||||||
{"", []string{
|
"application/epub+zip",
|
||||||
"https://www.googleapis.com/auth/drive",
|
"application/zip",
|
||||||
}, false},
|
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||||
{" drive.file , drive.readonly", []string{
|
"text/plain"
|
||||||
"https://www.googleapis.com/auth/drive.file",
|
],
|
||||||
"https://www.googleapis.com/auth/drive.readonly",
|
"application/vnd.google-apps.spreadsheet": [
|
||||||
}, false},
|
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||||
{" drive.file , drive.appfolder", []string{
|
"text/tab-separated-values",
|
||||||
"https://www.googleapis.com/auth/drive.file",
|
"application/pdf",
|
||||||
"https://www.googleapis.com/auth/drive.appfolder",
|
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||||
}, true},
|
"text/csv",
|
||||||
} {
|
"application/zip",
|
||||||
got := driveScopes(test.in)
|
"application/vnd.oasis.opendocument.spreadsheet"
|
||||||
assert.Equal(t, test.want, got, test.in)
|
],
|
||||||
gotFlag := driveScopesContainsAppFolder(got)
|
"application/vnd.google-apps.jam": [
|
||||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
"application/pdf"
|
||||||
}
|
],
|
||||||
}
|
"application/vnd.google-apps.script": [
|
||||||
|
"application/vnd.google-apps.script+json"
|
||||||
/*
|
],
|
||||||
var additionalMimeTypes = map[string]string{
|
"application/vnd.google-apps.presentation": [
|
||||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
"application/vnd.oasis.opendocument.presentation",
|
||||||
"application/vnd.ms-excel.template.macroenabled.12": ".xltm",
|
"application/pdf",
|
||||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": ".pptm",
|
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": ".ppsm",
|
"text/plain"
|
||||||
"application/vnd.ms-powerpoint.template.macroenabled.12": ".potm",
|
],
|
||||||
"application/vnd.ms-powerpoint": ".ppt",
|
"application/vnd.google-apps.form": [
|
||||||
"application/vnd.ms-word.document.macroenabled.12": ".docm",
|
"application/zip"
|
||||||
"application/vnd.ms-word.template.macroenabled.12": ".dotm",
|
],
|
||||||
"application/vnd.openxmlformats-officedocument.presentationml.template": ".potx",
|
"application/vnd.google-apps.drawing": [
|
||||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": ".xltx",
|
"image/svg+xml",
|
||||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": ".dotx",
|
"image/png",
|
||||||
"application/vnd.sun.xml.writer": ".sxw",
|
"application/pdf",
|
||||||
"text/richtext": ".rtf",
|
"image/jpeg"
|
||||||
}
|
]
|
||||||
*/
|
}`
|
||||||
|
|
||||||
// Load the example export formats into exportFormats for testing
|
// Load the example export formats into exportFormats for testing
|
||||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||||
fetchFormatsOnce.Do(func() {})
|
exportFormatsOnce.Do(func() {})
|
||||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &_exportFormats))
|
||||||
var about struct {
|
|
||||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
|
||||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, json.Unmarshal(buf, &about))
|
|
||||||
_exportFormats = fixMimeTypeMap(about.ExportFormats)
|
|
||||||
_importFormats = fixMimeTypeMap(about.ImportFormats)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalParseExtensions(t *testing.T) {
|
func TestInternalParseExtensions(t *testing.T) {
|
||||||
@@ -84,24 +65,27 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
want []string
|
want []string
|
||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"doc", []string{".doc"}, nil},
|
{"doc", []string{"doc"}, nil},
|
||||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||||
} {
|
} {
|
||||||
extensions, _, gotErr := parseExtensions(test.in)
|
f := new(Fs)
|
||||||
|
gotErr := f.parseExtensions(test.in)
|
||||||
if test.wantErr == nil {
|
if test.wantErr == nil {
|
||||||
assert.NoError(t, gotErr)
|
assert.NoError(t, gotErr)
|
||||||
} else {
|
} else {
|
||||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||||
}
|
}
|
||||||
assert.Equal(t, test.want, extensions)
|
assert.Equal(t, test.want, f.extensions)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test it is appending
|
// Test it is appending
|
||||||
extensions, _, gotErr := parseExtensions("docx,svg", "docx,svg,xlsx")
|
f := new(Fs)
|
||||||
assert.NoError(t, gotErr)
|
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||||
assert.Equal(t, []string{".docx", ".svg", ".xlsx"}, extensions)
|
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||||
|
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalFindExportFormat(t *testing.T) {
|
func TestInternalFindExportFormat(t *testing.T) {
|
||||||
@@ -115,17 +99,17 @@ func TestInternalFindExportFormat(t *testing.T) {
|
|||||||
wantMimeType string
|
wantMimeType string
|
||||||
}{
|
}{
|
||||||
{[]string{}, "", ""},
|
{[]string{}, "", ""},
|
||||||
{[]string{".pdf"}, ".pdf", "application/pdf"},
|
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||||
{[]string{".pdf", ".rtf", ".xls"}, ".pdf", "application/pdf"},
|
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||||
{[]string{".xls", ".rtf", ".pdf"}, ".rtf", "application/rtf"},
|
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||||
{[]string{".xls", ".csv", ".svg"}, "", ""},
|
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||||
} {
|
} {
|
||||||
f := new(Fs)
|
f := new(Fs)
|
||||||
f.exportExtensions = test.extensions
|
f.extensions = test.extensions
|
||||||
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
gotExtension, gotFilename, gotMimeType, gotIsDocument := f.findExportFormat(item)
|
||||||
assert.Equal(t, test.wantExtension, gotExtension)
|
assert.Equal(t, test.wantExtension, gotExtension)
|
||||||
if test.wantExtension != "" {
|
if test.wantExtension != "" {
|
||||||
assert.Equal(t, item.Name+gotExtension, gotFilename)
|
assert.Equal(t, item.Name+"."+gotExtension, gotFilename)
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(t, "", gotFilename)
|
assert.Equal(t, "", gotFilename)
|
||||||
}
|
}
|
||||||
@@ -133,155 +117,3 @@ func TestInternalFindExportFormat(t *testing.T) {
|
|||||||
assert.Equal(t, true, gotIsDocument)
|
assert.Equal(t, true, gotIsDocument)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMimeTypesToExtension(t *testing.T) {
|
|
||||||
for mimeType, extension := range _mimeTypeToExtension {
|
|
||||||
extensions, err := mime.ExtensionsByType(mimeType)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Contains(t, extensions, extension)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtensionToMimeType(t *testing.T) {
|
|
||||||
for mimeType, extension := range _mimeTypeToExtension {
|
|
||||||
gotMimeType := mime.TypeByExtension(extension)
|
|
||||||
mediatype, _, err := mime.ParseMediaType(gotMimeType)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, mimeType, mediatype)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtensionsForExportFormats(t *testing.T) {
|
|
||||||
if _exportFormats == nil {
|
|
||||||
t.Error("exportFormats == nil")
|
|
||||||
}
|
|
||||||
for fromMT, toMTs := range _exportFormats {
|
|
||||||
for _, toMT := range toMTs {
|
|
||||||
if !isInternalMimeType(toMT) {
|
|
||||||
extensions, err := mime.ExtensionsByType(toMT)
|
|
||||||
assert.NoError(t, err, "invalid MIME type %q", toMT)
|
|
||||||
assert.NotEmpty(t, extensions, "No extension found for %q (from: %q)", fromMT, toMT)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestExtensionsForImportFormats(t *testing.T) {
|
|
||||||
t.Skip()
|
|
||||||
if _importFormats == nil {
|
|
||||||
t.Error("_importFormats == nil")
|
|
||||||
}
|
|
||||||
for fromMT := range _importFormats {
|
|
||||||
if !isInternalMimeType(fromMT) {
|
|
||||||
extensions, err := mime.ExtensionsByType(fromMT)
|
|
||||||
assert.NoError(t, err, "invalid MIME type %q", fromMT)
|
|
||||||
assert.NotEmpty(t, extensions, "No extension found for %q", fromMT)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
|
||||||
oldAllow := f.opt.AllowImportNameChange
|
|
||||||
f.opt.AllowImportNameChange = true
|
|
||||||
defer func() {
|
|
||||||
f.opt.AllowImportNameChange = oldAllow
|
|
||||||
}()
|
|
||||||
|
|
||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
|
||||||
testFilesPath, err := filepath.Abs(filepath.FromSlash("test/files"))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testFilesFs, err := fs.NewFs(testFilesPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
var err error
|
|
||||||
|
|
||||||
f.exportExtensions, _, err = parseExtensions("txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj, err := f.NewObject(context.Background(), "example2.txt")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rc, err := obj.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
|
||||||
|
|
||||||
_, err = io.Copy(&buf, rc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
text := buf.String()
|
|
||||||
|
|
||||||
for _, excerpt := range []string{
|
|
||||||
"Lorem ipsum dolor sit amet, consectetur",
|
|
||||||
"porta at ultrices in, consectetur at augue.",
|
|
||||||
} {
|
|
||||||
require.Contains(t, text, excerpt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
var err error
|
|
||||||
|
|
||||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rc, err := obj.Open(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
|
||||||
|
|
||||||
_, err = io.Copy(&buf, rc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
text := buf.String()
|
|
||||||
|
|
||||||
require.True(t, strings.HasPrefix(text, "<html>"))
|
|
||||||
require.True(t, strings.HasSuffix(text, "</html>\n"))
|
|
||||||
for _, excerpt := range []string{
|
|
||||||
`<meta http-equiv="refresh"`,
|
|
||||||
`Loading <a href="`,
|
|
||||||
} {
|
|
||||||
require.Contains(t, text, excerpt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
|
||||||
// These tests all depend on each other so run them as nested tests
|
|
||||||
t.Run("DocumentImport", func(t *testing.T) {
|
|
||||||
f.InternalTestDocumentImport(t)
|
|
||||||
t.Run("DocumentUpdate", func(t *testing.T) {
|
|
||||||
f.InternalTestDocumentUpdate(t)
|
|
||||||
t.Run("DocumentExport", func(t *testing.T) {
|
|
||||||
f.InternalTestDocumentExport(t)
|
|
||||||
t.Run("DocumentLink", func(t *testing.T) {
|
|
||||||
f.InternalTestDocumentLink(t)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -1,35 +1,17 @@
|
|||||||
// Test Drive filesystem interface
|
// Test Drive filesystem interface
|
||||||
|
package drive_test
|
||||||
package drive
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/backend/drive"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestDrive:",
|
RemoteName: "TestDrive:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*drive.Object)(nil),
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MinChunkSize: minChunkSize,
|
|
||||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadChunkSize(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadCutoff(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
|
|||||||
@@ -1,178 +0,0 @@
|
|||||||
{
|
|
||||||
"importFormats": {
|
|
||||||
"text/tab-separated-values": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/x-vnd.oasis.opendocument.presentation": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"image/jpeg": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"image/bmp": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"image/gif": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-excel.sheet.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.template": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-powerpoint.presentation.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-word.template.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"image/pjpeg": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.script+text/plain": [
|
|
||||||
"application/vnd.google-apps.script"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-excel": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.sun.xml.writer": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-word.document.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-powerpoint.slideshow.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"text/rtf": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.oasis.opendocument.spreadsheet": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/x-vnd.oasis.opendocument.spreadsheet": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"image/png": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/x-vnd.oasis.opendocument.text": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/msword": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/pdf": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/json": [
|
|
||||||
"application/vnd.google-apps.script"
|
|
||||||
],
|
|
||||||
"application/x-msmetafile": [
|
|
||||||
"application/vnd.google-apps.drawing"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.template": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-powerpoint": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-excel.template.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"image/x-bmp": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/rtf": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.presentationml.template": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"image/x-png": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"text/html": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.oasis.opendocument.text": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.script+json": [
|
|
||||||
"application/vnd.google-apps.script"
|
|
||||||
],
|
|
||||||
"application/vnd.openxmlformats-officedocument.presentationml.slideshow": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"application/vnd.ms-powerpoint.template.macroenabled.12": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"text/csv": [
|
|
||||||
"application/vnd.google-apps.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.oasis.opendocument.presentation": [
|
|
||||||
"application/vnd.google-apps.presentation"
|
|
||||||
],
|
|
||||||
"image/jpg": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
],
|
|
||||||
"text/richtext": [
|
|
||||||
"application/vnd.google-apps.document"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"exportFormats": {
|
|
||||||
"application/vnd.google-apps.document": [
|
|
||||||
"application/rtf",
|
|
||||||
"application/vnd.oasis.opendocument.text",
|
|
||||||
"text/html",
|
|
||||||
"application/pdf",
|
|
||||||
"application/epub+zip",
|
|
||||||
"application/zip",
|
|
||||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
|
||||||
"text/plain"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.spreadsheet": [
|
|
||||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
|
||||||
"text/tab-separated-values",
|
|
||||||
"application/pdf",
|
|
||||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
|
||||||
"text/csv",
|
|
||||||
"application/zip",
|
|
||||||
"application/vnd.oasis.opendocument.spreadsheet"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.jam": [
|
|
||||||
"application/pdf"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.script": [
|
|
||||||
"application/vnd.google-apps.script+json"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.presentation": [
|
|
||||||
"application/vnd.oasis.opendocument.presentation",
|
|
||||||
"application/pdf",
|
|
||||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
|
||||||
"text/plain"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.form": [
|
|
||||||
"application/zip"
|
|
||||||
],
|
|
||||||
"application/vnd.google-apps.drawing": [
|
|
||||||
"image/svg+xml",
|
|
||||||
"image/png",
|
|
||||||
"application/pdf",
|
|
||||||
"image/jpeg"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -11,7 +11,6 @@
|
|||||||
package drive
|
package drive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -20,10 +19,10 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
|
"github.com/ncw/rclone/lib/readers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
@@ -51,13 +50,14 @@ type resumableUpload struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upload the io.Reader in of size bytes with contentType and info
|
// Upload the io.Reader in of size bytes with contentType and info
|
||||||
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||||
params := url.Values{
|
params := make(url.Values)
|
||||||
"alt": {"json"},
|
params.Set("alt", "json")
|
||||||
"uploadType": {"resumable"},
|
params.Set("uploadType", "resumable")
|
||||||
"fields": {partialFields},
|
params.Set("fields", partialFields)
|
||||||
|
if f.isTeamDrive {
|
||||||
|
params.Set("supportsTeamDrives", "true")
|
||||||
}
|
}
|
||||||
params.Set("supportsAllDrives", "true")
|
|
||||||
if f.opt.KeepRevisionForever {
|
if f.opt.KeepRevisionForever {
|
||||||
params.Set("keepRevisionForever", "true")
|
params.Set("keepRevisionForever", "true")
|
||||||
}
|
}
|
||||||
@@ -82,7 +82,6 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
googleapi.Expand(req.URL, map[string]string{
|
googleapi.Expand(req.URL, map[string]string{
|
||||||
"fileId": fileID,
|
"fileId": fileID,
|
||||||
})
|
})
|
||||||
@@ -108,13 +107,12 @@ func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType,
|
|||||||
MediaType: contentType,
|
MediaType: contentType,
|
||||||
ContentLength: size,
|
ContentLength: size,
|
||||||
}
|
}
|
||||||
return rx.Upload(ctx)
|
return rx.Upload()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make an http.Request for the range passed in
|
// Make an http.Request for the range passed in
|
||||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
req.ContentLength = reqSize
|
req.ContentLength = reqSize
|
||||||
if reqSize != 0 {
|
if reqSize != 0 {
|
||||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||||
@@ -132,8 +130,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
|||||||
// Query drive for the amount transferred so far
|
// Query drive for the amount transferred so far
|
||||||
//
|
//
|
||||||
// If error is nil, then start should be valid
|
// If error is nil, then start should be valid
|
||||||
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
|
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||||
req := rx.makeRequest(ctx, 0, nil, 0)
|
req := rx.makeRequest(0, nil, 0)
|
||||||
res, err := rx.f.client.Do(req)
|
res, err := rx.f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@@ -160,9 +158,9 @@ func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||||
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||||
_, _ = chunk.Seek(0, io.SeekStart)
|
_, _ = chunk.Seek(0, io.SeekStart)
|
||||||
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
req := rx.makeRequest(start, chunk, chunkSize)
|
||||||
res, err := rx.f.client.Do(req)
|
res, err := rx.f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 599, err
|
return 599, err
|
||||||
@@ -184,7 +182,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
|
|||||||
// been 200 OK.
|
// been 200 OK.
|
||||||
//
|
//
|
||||||
// So parse the response out of the body. We aren't expecting
|
// So parse the response out of the body. We aren't expecting
|
||||||
// any other 2xx codes, so we parse it unconditionally on
|
// any other 2xx codes, so we parse it unconditionaly on
|
||||||
// StatusCode
|
// StatusCode
|
||||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||||
return 598, err
|
return 598, err
|
||||||
@@ -195,7 +193,7 @@ func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk
|
|||||||
|
|
||||||
// Upload uploads the chunks from the input
|
// Upload uploads the chunks from the input
|
||||||
// It retries each chunk using the pacer and --low-level-retries
|
// It retries each chunk using the pacer and --low-level-retries
|
||||||
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||||
start := int64(0)
|
start := int64(0)
|
||||||
var StatusCode int
|
var StatusCode int
|
||||||
var err error
|
var err error
|
||||||
@@ -210,7 +208,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
|||||||
// Transfer the chunk
|
// Transfer the chunk
|
||||||
err = rx.f.pacer.Call(func() (bool, error) {
|
err = rx.f.pacer.Call(func() (bool, error) {
|
||||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||||
again, err := shouldRetry(err)
|
again, err := shouldRetry(err)
|
||||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||||
again = false
|
again = false
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ of path_display and all will be well.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@@ -32,30 +31,24 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
|
||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
|
"github.com/ncw/rclone/lib/pacer"
|
||||||
|
"github.com/ncw/rclone/lib/readers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const enc = encodings.Dropbox
|
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
const (
|
const (
|
||||||
rcloneClientID = "5jcck7diasz0rqy"
|
rcloneClientID = "5jcck7diasz0rqy"
|
||||||
@@ -86,8 +79,8 @@ const (
|
|||||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||||
// by default.
|
// by default.
|
||||||
defaultChunkSize = 48 * fs.MebiByte
|
defaultChunkSize = 48 * 1024 * 1024
|
||||||
maxChunkSize = 150 * fs.MebiByte
|
maxChunkSize = 150 * 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -106,14 +99,10 @@ var (
|
|||||||
// A regexp matching path names for files Dropbox ignores
|
// A regexp matching path names for files Dropbox ignores
|
||||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||||
|
|
||||||
// DbHashType is the hash.Type for Dropbox
|
|
||||||
DbHashType hash.Type
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
|
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "dropbox",
|
Name: "dropbox",
|
||||||
Description: "Dropbox",
|
Description: "Dropbox",
|
||||||
@@ -131,21 +120,9 @@ func init() {
|
|||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "chunk_size",
|
||||||
Help: fmt.Sprintf(`Upload chunk size. (< %v).
|
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||||
|
Default: fs.SizeSuffix(defaultChunkSize),
|
||||||
Any files larger than this will be uploaded in chunks of this size.
|
|
||||||
|
|
||||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
|
||||||
deal with retries. Setting this larger will increase the speed
|
|
||||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
|
||||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|
||||||
Default: defaultChunkSize,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "impersonate",
|
|
||||||
Help: "Impersonate this user when using a business account.",
|
|
||||||
Default: "",
|
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
@@ -153,8 +130,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
Impersonate string `config:"impersonate"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
@@ -166,10 +142,9 @@ type Fs struct {
|
|||||||
srv files.Client // the connection to the dropbox server
|
srv files.Client // the connection to the dropbox server
|
||||||
sharing sharing.Client // as above, but for generating sharing links
|
sharing sharing.Client // as above, but for generating sharing links
|
||||||
users users.Client // as above, but for accessing user information
|
users users.Client // as above, but for accessing user information
|
||||||
team team.Client // for the Teams API
|
|
||||||
slashRoot string // root with "/" prefix, lowercase
|
slashRoot string // root with "/" prefix, lowercase
|
||||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *pacer.Pacer // To pace the API calls
|
||||||
ns string // The namespace we are using or "" for none
|
ns string // The namespace we are using or "" for none
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,42 +188,14 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
baseErrString := errors.Cause(err).Error()
|
baseErrString := errors.Cause(err).Error()
|
||||||
// handle any official Retry-After header from Dropbox's SDK first
|
// FIXME there is probably a better way of doing this!
|
||||||
switch e := err.(type) {
|
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||||
case auth.RateLimitAPIError:
|
|
||||||
if e.RateLimitError.RetryAfter > 0 {
|
|
||||||
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
|
|
||||||
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
|
|
||||||
}
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
// Keep old behavior for backward compatibility
|
|
||||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
const minChunkSize = fs.Byte
|
|
||||||
if cs < minChunkSize {
|
|
||||||
return errors.Errorf("%s is less than %s", cs, minChunkSize)
|
|
||||||
}
|
|
||||||
if cs > maxChunkSize {
|
|
||||||
return errors.Errorf("%s is greater than %s", cs, maxChunkSize)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|
||||||
err = checkUploadChunkSize(cs)
|
|
||||||
if err == nil {
|
|
||||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -256,9 +203,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = checkUploadChunkSize(opt.ChunkSize)
|
if opt.ChunkSize > maxChunkSize {
|
||||||
if err != nil {
|
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||||
return nil, errors.Wrap(err, "dropbox: chunk size")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the old token if it exists. The old token was just
|
// Convert the old token if it exists. The old token was just
|
||||||
@@ -282,36 +228,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
}
|
}
|
||||||
config := dropbox.Config{
|
config := dropbox.Config{
|
||||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||||
Client: oAuthClient, // maybe???
|
Client: oAuthClient, // maybe???
|
||||||
HeaderGenerator: f.headerGenerator,
|
HeaderGenerator: f.headerGenerator,
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: needs to be created pre-impersonation so we can look up the impersonated user
|
|
||||||
f.team = team.New(config)
|
|
||||||
|
|
||||||
if opt.Impersonate != "" {
|
|
||||||
|
|
||||||
user := team.UserSelectorArg{
|
|
||||||
Email: opt.Impersonate,
|
|
||||||
}
|
|
||||||
user.Tag = "email"
|
|
||||||
|
|
||||||
members := []*team.UserSelectorArg{&user}
|
|
||||||
args := team.NewMembersGetInfoArgs(members)
|
|
||||||
|
|
||||||
memberIds, err := f.team.MembersGetInfo(args)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid dropbox team member: %q", opt.Impersonate)
|
|
||||||
}
|
|
||||||
|
|
||||||
config.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
|
||||||
}
|
|
||||||
|
|
||||||
f.srv = files.New(config)
|
f.srv = files.New(config)
|
||||||
f.sharing = sharing.New(config)
|
f.sharing = sharing.New(config)
|
||||||
f.users = users.New(config)
|
f.users = users.New(config)
|
||||||
@@ -380,15 +303,14 @@ func (f *Fs) setRoot(root string) {
|
|||||||
// getMetadata gets the metadata for a file or directory
|
// getMetadata gets the metadata for a file or directory
|
||||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
|
||||||
Path: enc.FromStandardPath(objPath),
|
|
||||||
})
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case files.GetMetadataAPIError:
|
case files.GetMetadataAPIError:
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
switch e.EndpointError.Path.Tag {
|
||||||
|
case files.LookupErrorNotFound:
|
||||||
notFound = true
|
notFound = true
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
@@ -451,7 +373,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -464,7 +386,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += "/" + dir
|
root += "/" + dir
|
||||||
@@ -475,7 +397,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
for {
|
for {
|
||||||
if !started {
|
if !started {
|
||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: enc.FromStandardPath(root),
|
Path: root,
|
||||||
Recursive: false,
|
Recursive: false,
|
||||||
}
|
}
|
||||||
if root == "/" {
|
if root == "/" {
|
||||||
@@ -488,7 +410,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case files.ListFolderAPIError:
|
case files.ListFolderAPIError:
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
switch e.EndpointError.Path.Tag {
|
||||||
|
case files.LookupErrorNotFound:
|
||||||
err = fs.ErrorDirNotFound
|
err = fs.ErrorDirNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -525,7 +448,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
|
|
||||||
// Only the last element is reliably cased in PathDisplay
|
// Only the last element is reliably cased in PathDisplay
|
||||||
entryPath := metadata.PathDisplay
|
entryPath := metadata.PathDisplay
|
||||||
leaf := enc.ToStandardName(path.Base(entryPath))
|
leaf := path.Base(entryPath)
|
||||||
remote := path.Join(dir, leaf)
|
remote := path.Join(dir, leaf)
|
||||||
if folderInfo != nil {
|
if folderInfo != nil {
|
||||||
d := fs.NewDir(remote, time.Now())
|
d := fs.NewDir(remote, time.Now())
|
||||||
@@ -550,22 +473,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(ctx, in, src, options...)
|
return f.Put(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't create or run metadata on root
|
// can't create or run metadata on root
|
||||||
@@ -583,7 +506,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// create it
|
// create it
|
||||||
arg2 := files.CreateFolderArg{
|
arg2 := files.CreateFolderArg{
|
||||||
Path: enc.FromStandardPath(root),
|
Path: root,
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.CreateFolderV2(&arg2)
|
_, err = f.srv.CreateFolderV2(&arg2)
|
||||||
@@ -595,7 +518,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|||||||
// Rmdir deletes the container
|
// Rmdir deletes the container
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't remove root
|
// can't remove root
|
||||||
@@ -609,7 +532,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
return errors.Wrap(err, "Rmdir")
|
return errors.Wrap(err, "Rmdir")
|
||||||
}
|
}
|
||||||
|
|
||||||
root = enc.FromStandardPath(root)
|
|
||||||
// check directory empty
|
// check directory empty
|
||||||
arg := files.ListFolderArg{
|
arg := files.ListFolderArg{
|
||||||
Path: root,
|
Path: root,
|
||||||
@@ -652,7 +574,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@@ -666,12 +588,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy
|
// Copy
|
||||||
arg := files.RelocationArg{
|
arg := files.RelocationArg{}
|
||||||
RelocationPath: files.RelocationPath{
|
arg.FromPath = srcObj.remotePath()
|
||||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
arg.ToPath = dstObj.remotePath()
|
||||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -700,12 +619,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
func (f *Fs) Purge() (err error) {
|
||||||
// Let dropbox delete the filesystem tree
|
// Let dropbox delete the filesystem tree
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||||
Path: enc.FromStandardPath(f.slashRoot),
|
|
||||||
})
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
@@ -720,7 +637,7 @@ func (f *Fs) Purge(ctx context.Context) (err error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -734,12 +651,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
arg := files.RelocationArg{
|
arg := files.RelocationArg{}
|
||||||
RelocationPath: files.RelocationPath{
|
arg.FromPath = srcObj.remotePath()
|
||||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
arg.ToPath = dstObj.remotePath()
|
||||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
var err error
|
var err error
|
||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@@ -763,8 +677,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||||
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
absPath := "/" + path.Join(f.Root(), remote)
|
||||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||||
Path: absPath,
|
Path: absPath,
|
||||||
@@ -775,8 +689,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
|||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil && strings.Contains(err.Error(),
|
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
|
||||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||||
listArg := sharing.ListSharedLinksArg{
|
listArg := sharing.ListSharedLinksArg{
|
||||||
Path: absPath,
|
Path: absPath,
|
||||||
@@ -817,7 +730,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -838,12 +751,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
// ...apparently not necessary
|
// ...apparently not necessary
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
arg := files.RelocationArg{
|
arg := files.RelocationArg{}
|
||||||
RelocationPath: files.RelocationPath{
|
arg.FromPath = srcPath
|
||||||
FromPath: enc.FromStandardPath(srcPath),
|
arg.ToPath = dstPath
|
||||||
ToPath: enc.FromStandardPath(dstPath),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.MoveV2(&arg)
|
_, err = f.srv.MoveV2(&arg)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
@@ -856,7 +766,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||||
var q *users.SpaceUsage
|
var q *users.SpaceUsage
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
q, err = f.users.GetSpaceUsage()
|
q, err = f.users.GetSpaceUsage()
|
||||||
@@ -884,7 +794,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
return hash.Set(DbHashType)
|
return hash.Set(hash.Dropbox)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@@ -908,8 +818,8 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the dropbox special hash
|
// Hash returns the dropbox special hash
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||||
if t != DbHashType {
|
if t != hash.Dropbox {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
@@ -970,7 +880,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
@@ -982,7 +892,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
//
|
//
|
||||||
// Commits the datastore
|
// Commits the datastore
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(modTime time.Time) error {
|
||||||
// Dropbox doesn't have a way of doing this so returning this
|
// Dropbox doesn't have a way of doing this so returning this
|
||||||
// error will cause the file to be deleted first then
|
// error will cause the file to be deleted first then
|
||||||
// re-uploaded to set the time.
|
// re-uploaded to set the time.
|
||||||
@@ -995,13 +905,9 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
fs.FixRangeOption(options, o.bytes)
|
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{
|
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||||
Path: enc.FromStandardPath(o.remotePath()),
|
|
||||||
ExtraHeaders: headers,
|
|
||||||
}
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, in, err = o.fs.srv.Download(&arg)
|
_, in, err = o.fs.srv.Download(&arg)
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
@@ -1010,7 +916,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case files.DownloadAPIError:
|
case files.DownloadAPIError:
|
||||||
// Don't attempt to retry copyright violation errors
|
// Don't attempt to retry copyright violation errors
|
||||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||||
return nil, fserrors.NoRetryError(err)
|
return nil, fserrors.NoRetryError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1125,15 +1031,16 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
fs.Logf(o, "File name disallowed - not uploading")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
|
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
@@ -1153,11 +1060,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove() (err error) {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||||
Path: enc.FromStandardPath(o.remotePath()),
|
|
||||||
})
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -1,26 +1,17 @@
|
|||||||
// Test Dropbox filesystem interface
|
// Test Dropbox filesystem interface
|
||||||
package dropbox
|
package dropbox_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/backend/dropbox"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestDropbox:",
|
RemoteName: "TestDropbox:",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*dropbox.Object)(nil),
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
|
||||||
MaxChunkSize: maxChunkSize,
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|
||||||
return f.setUploadChunkSize(cs)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
|
||||||
|
|||||||
@@ -1,396 +0,0 @@
|
|||||||
package fichier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
|
||||||
var retryErrorCodes = []int{
|
|
||||||
429, // Too Many Requests.
|
|
||||||
500, // Internal Server Error
|
|
||||||
502, // Bad Gateway
|
|
||||||
503, // Service Unavailable
|
|
||||||
504, // Gateway Timeout
|
|
||||||
509, // Bandwidth Limit Exceeded
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRetry returns a boolean as to whether this resp and err
|
|
||||||
// deserve to be retried. It returns the err as a convenience
|
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
|
||||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
|
||||||
}
|
|
||||||
|
|
||||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
|
||||||
|
|
||||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
|
||||||
request := DownloadRequest{
|
|
||||||
URL: url,
|
|
||||||
Single: 1,
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/download/get_token.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
var token GetTokenResponse
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &token, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fileFromSharedFile(file *SharedFile) File {
|
|
||||||
return File{
|
|
||||||
URL: file.Link,
|
|
||||||
Filename: file.Filename,
|
|
||||||
Size: file.Size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: "https://1fichier.com/dir/",
|
|
||||||
Path: id,
|
|
||||||
Parameters: map[string][]string{"json": {"1"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
var sharedFiles SharedFolderResponse
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
|
||||||
|
|
||||||
for i, sharedFile := range sharedFiles {
|
|
||||||
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
|
|
||||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
|
||||||
request := ListFilesRequest{
|
|
||||||
FolderID: directoryID,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file/ls.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
filesList = &FilesList{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't list files")
|
|
||||||
}
|
|
||||||
for i := range filesList.Items {
|
|
||||||
item := &filesList.Items[i]
|
|
||||||
item.Filename = enc.ToStandardName(item.Filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
return filesList, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
|
|
||||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
|
||||||
|
|
||||||
request := ListFolderRequest{
|
|
||||||
FolderID: directoryID,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/folder/ls.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
foldersList = &FoldersList{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't list folders")
|
|
||||||
}
|
|
||||||
foldersList.Name = enc.ToStandardName(foldersList.Name)
|
|
||||||
for i := range foldersList.SubFolders {
|
|
||||||
folder := &foldersList.SubFolders[i]
|
|
||||||
folder.Name = enc.ToStandardName(folder.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
|
||||||
|
|
||||||
return foldersList, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := f.listFiles(ctx, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
folders, err := f.listFolders(ctx, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
|
||||||
|
|
||||||
for i, item := range files.Items {
|
|
||||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, folder := range folders.SubFolders {
|
|
||||||
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fullPath := getRemote(dir, folder.Name)
|
|
||||||
folderID := strconv.Itoa(folder.ID)
|
|
||||||
|
|
||||||
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
|
|
||||||
f.dirCache.Put(fullPath, folderID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: getRemote(dir, item.Filename),
|
|
||||||
file: item,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRemote(dir, fileName string) string {
|
|
||||||
if dir == "" {
|
|
||||||
return fileName
|
|
||||||
}
|
|
||||||
|
|
||||||
return dir + "/" + fileName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
|
||||||
name := enc.FromStandardName(leaf)
|
|
||||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
|
||||||
|
|
||||||
request := MakeFolderRequest{
|
|
||||||
FolderID: folderID,
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/folder/mkdir.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &MakeFolderResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't create folder")
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
|
|
||||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
|
||||||
|
|
||||||
request := &RemoveFolderRequest{
|
|
||||||
FolderID: folderID,
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/folder/rm.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &GenericOKResponse{}
|
|
||||||
var resp *http.Response
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
|
||||||
}
|
|
||||||
if response.Status != "OK" {
|
|
||||||
return nil, errors.New("Can't remove non-empty dir")
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
|
|
||||||
request := &RemoveFileRequest{
|
|
||||||
Files: []RmFile{
|
|
||||||
{url},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/file/rm.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &GenericOKResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't remove file")
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
|
||||||
// fs.Debugf(f, "Requesting Upload node")
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
ContentType: "application/json", // 1Fichier API is bad
|
|
||||||
Path: "/upload/get_upload_server.cgi",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &GetUploadNodeResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Got Upload node")
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
|
||||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
|
||||||
|
|
||||||
fileName = enc.FromStandardName(fileName)
|
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
|
||||||
return nil, errors.New("Invalid UploadID")
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "POST",
|
|
||||||
Path: "/upload.cgi",
|
|
||||||
Parameters: map[string][]string{
|
|
||||||
"id": {uploadID},
|
|
||||||
},
|
|
||||||
NoResponse: true,
|
|
||||||
Body: in,
|
|
||||||
ContentLength: &size,
|
|
||||||
MultipartContentName: "file[]",
|
|
||||||
MultipartFileName: fileName,
|
|
||||||
MultipartParams: map[string][]string{
|
|
||||||
"did": {folderID},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if node != "" {
|
|
||||||
opts.RootURL = "https://" + node
|
|
||||||
}
|
|
||||||
|
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't upload file")
|
|
||||||
}
|
|
||||||
|
|
||||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
|
||||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
|
||||||
|
|
||||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
|
||||||
return nil, errors.New("Invalid UploadID")
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
Path: "/end.pl",
|
|
||||||
RootURL: "https://" + nodeurl,
|
|
||||||
Parameters: map[string][]string{
|
|
||||||
"xid": {uploadID},
|
|
||||||
},
|
|
||||||
ExtraHeaders: map[string]string{
|
|
||||||
"JSON": "1",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
response = &EndFileUploadResponse{}
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
|
||||||
}
|
|
||||||
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
@@ -1,413 +0,0 @@
|
|||||||
package fichier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
rootID = "0"
|
|
||||||
apiBaseURL = "https://api.1fichier.com/v1"
|
|
||||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
|
||||||
maxSleep = 5 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
)
|
|
||||||
|
|
||||||
const enc = encodings.Fichier
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
fs.Register(&fs.RegInfo{
|
|
||||||
Name: "fichier",
|
|
||||||
Description: "1Fichier",
|
|
||||||
Config: func(name string, config configmap.Mapper) {
|
|
||||||
},
|
|
||||||
NewFs: NewFs,
|
|
||||||
Options: []fs.Option{
|
|
||||||
{
|
|
||||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
|
||||||
Name: "api_key",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Help: "If you want to download a shared folder, add this parameter",
|
|
||||||
Name: "shared_folder",
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
|
||||||
type Options struct {
|
|
||||||
APIKey string `config:"api_key"`
|
|
||||||
SharedFolder string `config:"shared_folder"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs is the interface a cloud storage system must provide
|
|
||||||
type Fs struct {
|
|
||||||
root string
|
|
||||||
name string
|
|
||||||
features *fs.Features
|
|
||||||
dirCache *dircache.DirCache
|
|
||||||
baseClient *http.Client
|
|
||||||
options *Options
|
|
||||||
pacer *fs.Pacer
|
|
||||||
rest *rest.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
|
||||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
|
||||||
folderID, err := strconv.Atoi(pathID)
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
folders, err := f.listFolders(ctx, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, folder := range folders.SubFolders {
|
|
||||||
if folder.Name == leaf {
|
|
||||||
pathIDOut := strconv.Itoa(folder.ID)
|
|
||||||
return pathIDOut, true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
|
||||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
|
||||||
folderID, err := strconv.Atoi(pathID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
resp, err := f.makeFolder(ctx, leaf, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return strconv.Itoa(resp.FolderID), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Name() string {
|
|
||||||
return f.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
|
||||||
func (f *Fs) Root() string {
|
|
||||||
return f.root
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the FS
|
|
||||||
func (f *Fs) String() string {
|
|
||||||
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Precision of the ModTimes in this Fs
|
|
||||||
func (f *Fs) Precision() time.Duration {
|
|
||||||
return fs.ModTimeNotSupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hashes returns the supported hash types of the filesystem
|
|
||||||
func (f *Fs) Hashes() hash.Set {
|
|
||||||
return hash.Set(hash.Whirlpool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
|
||||||
func (f *Fs) Features() *fs.Features {
|
|
||||||
return f.features
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs makes a new Fs object from the path
|
|
||||||
//
|
|
||||||
// The path is of the form remote:path
|
|
||||||
//
|
|
||||||
// Remotes are looked up in the config file. If the remote isn't
|
|
||||||
// found then NotFoundInConfigFile will be returned.
|
|
||||||
//
|
|
||||||
// On Windows avoid single character remote names as they can be mixed
|
|
||||||
// up with drive letters.
|
|
||||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
|
||||||
opt := new(Options)
|
|
||||||
err := configstruct.Set(config, opt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If using a Shared Folder override root
|
|
||||||
if opt.SharedFolder != "" {
|
|
||||||
root = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
//workaround for wonky parser
|
|
||||||
root = strings.Trim(root, "/")
|
|
||||||
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
options: opt,
|
|
||||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
baseClient: &http.Client{},
|
|
||||||
}
|
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
DuplicateFiles: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(f)
|
|
||||||
|
|
||||||
client := fshttp.NewClient(fs.Config)
|
|
||||||
|
|
||||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
|
||||||
|
|
||||||
f.rest.SetHeader("Authorization", "Bearer "+f.options.APIKey)
|
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, rootID, f)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Find the current root
|
|
||||||
err = f.dirCache.FindRoot(ctx, false)
|
|
||||||
if err != nil {
|
|
||||||
// Assume it is a file
|
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
|
||||||
tempF := *f
|
|
||||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
|
||||||
tempF.root = newRoot
|
|
||||||
// Make new Fs which is the parent
|
|
||||||
err = tempF.dirCache.FindRoot(ctx, false)
|
|
||||||
if err != nil {
|
|
||||||
// No root so return old f
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
_, err := tempF.NewObject(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorObjectNotFound {
|
|
||||||
// File doesn't exist so return old f
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.features.Fill(&tempF)
|
|
||||||
// XXX: update the old f here instead of returning tempF, since
|
|
||||||
// `features` were already filled with functions having *f as a receiver.
|
|
||||||
// See https://github.com/rclone/rclone/issues/2182
|
|
||||||
f.dirCache = tempF.dirCache
|
|
||||||
f.root = tempF.root
|
|
||||||
// return an error with an fs which points to the parent
|
|
||||||
return f, fs.ErrorIsFile
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
|
||||||
// entries can be returned in any order but should be for a
|
|
||||||
// complete directory.
|
|
||||||
//
|
|
||||||
// dir should be "" to list the root, and should not have
|
|
||||||
// trailing slashes.
|
|
||||||
//
|
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
|
||||||
// found.
|
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
if f.options.SharedFolder != "" {
|
|
||||||
return f.listSharedFiles(ctx, f.options.SharedFolder)
|
|
||||||
}
|
|
||||||
|
|
||||||
dirContent, err := f.listDir(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return dirContent, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
|
||||||
// it returns the error ErrorObjectNotFound.
|
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
|
||||||
if err != nil {
|
|
||||||
if err == fs.ErrorDirNotFound {
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files, err := f.listFiles(ctx, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files.Items {
|
|
||||||
if file.Filename == leaf {
|
|
||||||
path, ok := f.dirCache.GetInv(directoryID)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("Cannot find dir in dircache")
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.newObjectFromFile(ctx, path, file), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
|
||||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
|
||||||
// return an error or upload it properly (rather than e.g. calling panic).
|
|
||||||
//
|
|
||||||
// May create the object even if it returns an error - if so
|
|
||||||
// will return the object and the error, otherwise will return
|
|
||||||
// nil and the error
|
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
|
||||||
switch err {
|
|
||||||
case nil:
|
|
||||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
|
||||||
case fs.ErrorObjectNotFound:
|
|
||||||
// Not found so create it
|
|
||||||
return f.PutUnchecked(ctx, in, src, options...)
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUnchecked uploads the object with the given name and size
|
|
||||||
//
|
|
||||||
// This will create a duplicate if we upload a new file without
|
|
||||||
// checking to see if there is one already - use Put() for that.
|
|
||||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
if size > int64(100e9) {
|
|
||||||
return nil, errors.New("File too big, cant upload")
|
|
||||||
} else if size == 0 {
|
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeResponse, err := f.getUploadNode(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fileUploadResponse.Links) != 1 {
|
|
||||||
return nil, errors.New("unexpected amount of files")
|
|
||||||
}
|
|
||||||
|
|
||||||
link := fileUploadResponse.Links[0]
|
|
||||||
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
file: File{
|
|
||||||
ACL: 0,
|
|
||||||
CDN: 0,
|
|
||||||
Checksum: link.Whirlpool,
|
|
||||||
ContentType: "",
|
|
||||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
|
||||||
Filename: link.Filename,
|
|
||||||
Pass: 0,
|
|
||||||
Size: fileSize,
|
|
||||||
URL: link.Download,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
|
||||||
//
|
|
||||||
// This will create a duplicate if we upload a new file without
|
|
||||||
// checking to see if there is one already - use Put() for that.
|
|
||||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
||||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
|
||||||
//
|
|
||||||
// Shouldn't return an error if it already exists
|
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
||||||
err := f.dirCache.FindRoot(ctx, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if dir != "" {
|
|
||||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
|
||||||
//
|
|
||||||
// Return an error if it doesn't exist or isn't empty
|
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
||||||
err := f.dirCache.FindRoot(ctx, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = f.removeFolder(ctx, dir, folderID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.dirCache.FlushDir(dir)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Fs = (*Fs)(nil)
|
|
||||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
||||||
_ dircache.DirCacher = (*Fs)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
// Test 1Fichier filesystem interface
|
|
||||||
package fichier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
fs.Config.LogLevel = fs.LogLevelDebug
|
|
||||||
fstests.Run(t, &fstests.Opt{
|
|
||||||
RemoteName: "TestFichier:",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,158 +0,0 @@
|
|||||||
package fichier
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Object is a filesystem like object provided by an Fs
|
|
||||||
type Object struct {
|
|
||||||
fs *Fs
|
|
||||||
remote string
|
|
||||||
file File
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a description of the Object
|
|
||||||
func (o *Object) String() string {
|
|
||||||
return o.file.Filename
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the remote path
|
|
||||||
func (o *Object) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime returns the modification date of the file
|
|
||||||
// It should return a best guess if one isn't available
|
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
||||||
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
return modTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the file
|
|
||||||
func (o *Object) Size() int64 {
|
|
||||||
return o.file.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fs returns read only access to the Fs that this object is part of
|
|
||||||
func (o *Object) Fs() fs.Info {
|
|
||||||
return o.fs
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
|
||||||
// If no checksum is available it returns ""
|
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|
||||||
if t != hash.Whirlpool {
|
|
||||||
return "", hash.ErrUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.file.Checksum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storable says whether this object can be stored
|
|
||||||
func (o *Object) Storable() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetModTime sets the metadata on the object to set the modification date
|
|
||||||
func (o *Object) SetModTime(context.Context, time.Time) error {
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
|
||||||
fs.FixRangeOption(options, o.file.Size)
|
|
||||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp *http.Response
|
|
||||||
opts := rest.Opts{
|
|
||||||
Method: "GET",
|
|
||||||
RootURL: downloadToken.URL,
|
|
||||||
Options: options,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
|
||||||
return shouldRetry(resp, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp.Body, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
|
||||||
//
|
|
||||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
|
||||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
|
||||||
// return an error or update the object properly (rather than e.g. calling panic).
|
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
|
||||||
if src.Size() < 0 {
|
|
||||||
return errors.New("refusing to update with unknown size")
|
|
||||||
}
|
|
||||||
|
|
||||||
// upload with new size but old name
|
|
||||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete duplicate after successful upload
|
|
||||||
err = o.Remove(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to remove old version")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replace guts of old object with new one
|
|
||||||
*o = *info.(*Object)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes this object
|
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
|
||||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
|
||||||
|
|
||||||
_, err := o.fs.deleteFile(ctx, o.file.URL)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
|
||||||
return o.file.ContentType
|
|
||||||
}
|
|
||||||
|
|
||||||
// ID returns the ID of the Object if known, or "" if not
|
|
||||||
func (o *Object) ID() string {
|
|
||||||
return o.file.URL
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
|
||||||
var (
|
|
||||||
_ fs.Object = (*Object)(nil)
|
|
||||||
_ fs.MimeTyper = (*Object)(nil)
|
|
||||||
_ fs.IDer = (*Object)(nil)
|
|
||||||
)
|
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
package fichier
|
|
||||||
|
|
||||||
// ListFolderRequest is the request structure of the corresponding request
|
|
||||||
type ListFolderRequest struct {
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListFilesRequest is the request structure of the corresponding request
|
|
||||||
type ListFilesRequest struct {
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadRequest is the request structure of the corresponding request
|
|
||||||
type DownloadRequest struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Single int `json:"single"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveFolderRequest is the request structure of the corresponding request
|
|
||||||
type RemoveFolderRequest struct {
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveFileRequest is the request structure of the corresponding request
|
|
||||||
type RemoveFileRequest struct {
|
|
||||||
Files []RmFile `json:"files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// RmFile is the request structure of the corresponding request
|
|
||||||
type RmFile struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenericOKResponse is the response structure of the corresponding request
|
|
||||||
type GenericOKResponse struct {
|
|
||||||
Status string `json:"status"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeFolderRequest is the request structure of the corresponding request
|
|
||||||
type MakeFolderRequest struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeFolderResponse is the response structure of the corresponding request
|
|
||||||
type MakeFolderResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
|
||||||
type GetUploadNodeResponse struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTokenResponse is the response structure of the corresponding request
|
|
||||||
type GetTokenResponse struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Status string `json:"Status"`
|
|
||||||
Message string `json:"Message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedFolderResponse is the response structure of the corresponding request
|
|
||||||
type SharedFolderResponse []SharedFile
|
|
||||||
|
|
||||||
// SharedFile is the structure how 1Fichier returns a shared File
|
|
||||||
type SharedFile struct {
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Link string `json:"link"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndFileUploadResponse is the response structure of the corresponding request
|
|
||||||
type EndFileUploadResponse struct {
|
|
||||||
Incoming int `json:"incoming"`
|
|
||||||
Links []struct {
|
|
||||||
Download string `json:"download"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Remove string `json:"remove"`
|
|
||||||
Size string `json:"size"`
|
|
||||||
Whirlpool string `json:"whirlpool"`
|
|
||||||
} `json:"links"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// File is the structure how 1Fichier returns a File
|
|
||||||
type File struct {
|
|
||||||
ACL int `json:"acl"`
|
|
||||||
CDN int `json:"cdn"`
|
|
||||||
Checksum string `json:"checksum"`
|
|
||||||
ContentType string `json:"content-type"`
|
|
||||||
Date string `json:"date"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
Pass int `json:"pass"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilesList is the structure how 1Fichier returns a list of files
|
|
||||||
type FilesList struct {
|
|
||||||
Items []File `json:"items"`
|
|
||||||
Status string `json:"Status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Folder is the structure how 1Fichier returns a Folder
|
|
||||||
type Folder struct {
|
|
||||||
CreateDate string `json:"create_date"`
|
|
||||||
ID int `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Pass int `json:"pass"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FoldersList is the structure how 1Fichier returns a list of Folders
|
|
||||||
type FoldersList struct {
|
|
||||||
FolderID int `json:"folder_id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Status string `json:"Status"`
|
|
||||||
SubFolders []Folder `json:"sub_folders"`
|
|
||||||
}
|
|
||||||
@@ -2,8 +2,6 @@
|
|||||||
package ftp
|
package ftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"os"
|
"os"
|
||||||
@@ -12,19 +10,15 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/readers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/readers"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const enc = encodings.FTP
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -51,25 +45,6 @@ func init() {
|
|||||||
Help: "FTP password",
|
Help: "FTP password",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Required: true,
|
Required: true,
|
||||||
}, {
|
|
||||||
Name: "tls",
|
|
||||||
Help: "Use FTP over TLS (Implicit)",
|
|
||||||
Default: false,
|
|
||||||
}, {
|
|
||||||
Name: "concurrency",
|
|
||||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
|
||||||
Default: 0,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "no_check_certificate",
|
|
||||||
Help: "Do not verify the TLS certificate of the server",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "disable_epsv",
|
|
||||||
Help: "Disable using EPSV even if server advertises support",
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@@ -77,14 +52,10 @@ func init() {
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Host string `config:"host"`
|
Host string `config:"host"`
|
||||||
User string `config:"user"`
|
User string `config:"user"`
|
||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
Port string `config:"port"`
|
Port string `config:"port"`
|
||||||
TLS bool `config:"tls"`
|
|
||||||
Concurrency int `config:"concurrency"`
|
|
||||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
|
||||||
DisableEPSV bool `config:"disable_epsv"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@@ -99,7 +70,6 @@ type Fs struct {
|
|||||||
dialAddr string
|
dialAddr string
|
||||||
poolMu sync.Mutex
|
poolMu sync.Mutex
|
||||||
pool []*ftp.ServerConn
|
pool []*ftp.ServerConn
|
||||||
tokens *pacer.TokenDispenser
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes an FTP file
|
// Object describes an FTP file
|
||||||
@@ -142,18 +112,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
// Open a new connection to the FTP server.
|
// Open a new connection to the FTP server.
|
||||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||||
fs.Debugf(f, "Connecting to FTP server")
|
fs.Debugf(f, "Connecting to FTP server")
|
||||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
|
||||||
if f.opt.TLS {
|
|
||||||
tlsConfig := &tls.Config{
|
|
||||||
ServerName: f.opt.Host,
|
|
||||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
|
||||||
}
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
|
||||||
}
|
|
||||||
if f.opt.DisableEPSV {
|
|
||||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
|
||||||
}
|
|
||||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||||
@@ -169,9 +128,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
|||||||
|
|
||||||
// Get an FTP connection from the pool, or open a new one
|
// Get an FTP connection from the pool, or open a new one
|
||||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||||
if f.opt.Concurrency > 0 {
|
|
||||||
f.tokens.Get()
|
|
||||||
}
|
|
||||||
f.poolMu.Lock()
|
f.poolMu.Lock()
|
||||||
if len(f.pool) > 0 {
|
if len(f.pool) > 0 {
|
||||||
c = f.pool[0]
|
c = f.pool[0]
|
||||||
@@ -191,9 +147,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
|||||||
// if err is not nil then it checks the connection is alive using a
|
// if err is not nil then it checks the connection is alive using a
|
||||||
// NOOP request
|
// NOOP request
|
||||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||||
if f.opt.Concurrency > 0 {
|
|
||||||
defer f.tokens.Put()
|
|
||||||
}
|
|
||||||
c := *pc
|
c := *pc
|
||||||
*pc = nil
|
*pc = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -213,9 +166,8 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
f.poolMu.Unlock()
|
f.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
ctx := context.Background()
|
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@@ -237,11 +189,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
dialAddr := opt.Host + ":" + port
|
dialAddr := opt.Host + ":" + port
|
||||||
protocol := "ftp://"
|
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||||
if opt.TLS {
|
|
||||||
protocol = "ftps://"
|
|
||||||
}
|
|
||||||
u := protocol + path.Join(dialAddr+"/", root)
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
@@ -250,7 +198,6 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
user: user,
|
user: user,
|
||||||
pass: pass,
|
pass: pass,
|
||||||
dialAddr: dialAddr,
|
dialAddr: dialAddr,
|
||||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@@ -268,7 +215,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(ctx, remote)
|
_, err := f.NewObject(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@@ -307,37 +254,10 @@ func translateErrorDir(err error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
|
||||||
func entryToStandard(entry *ftp.Entry) {
|
|
||||||
// Skip . and .. as we don't want these encoded
|
|
||||||
if entry.Name == "." || entry.Name == ".." {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
entry.Name = enc.ToStandardName(entry.Name)
|
|
||||||
entry.Target = enc.ToStandardPath(entry.Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dirFromStandardPath returns dir in encoded form.
|
|
||||||
func dirFromStandardPath(dir string) string {
|
|
||||||
// Skip . and .. as we don't want these encoded
|
|
||||||
if dir == "." || dir == ".." {
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
return enc.FromStandardPath(dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findItem finds a directory entry for the name in its parent directory
|
// findItem finds a directory entry for the name in its parent directory
|
||||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
fullPath := path.Join(f.root, remote)
|
fullPath := path.Join(f.root, remote)
|
||||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
|
||||||
// if root, assume exists and synthesize an entry
|
|
||||||
return &ftp.Entry{
|
|
||||||
Name: "",
|
|
||||||
Type: ftp.EntryTypeFolder,
|
|
||||||
Time: time.Now(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
dir := path.Dir(fullPath)
|
dir := path.Dir(fullPath)
|
||||||
base := path.Base(fullPath)
|
base := path.Base(fullPath)
|
||||||
|
|
||||||
@@ -345,13 +265,12 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "findItem")
|
return nil, errors.Wrap(err, "findItem")
|
||||||
}
|
}
|
||||||
files, err := c.List(dirFromStandardPath(dir))
|
files, err := c.List(dir)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, translateErrorFile(err)
|
return nil, translateErrorFile(err)
|
||||||
}
|
}
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
entryToStandard(file)
|
|
||||||
if file.Name == base {
|
if file.Name == base {
|
||||||
return file, nil
|
return file, nil
|
||||||
}
|
}
|
||||||
@@ -361,7 +280,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -405,42 +324,17 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "list")
|
return nil, errors.Wrap(err, "list")
|
||||||
}
|
}
|
||||||
|
files, err := c.List(path.Join(f.root, dir))
|
||||||
var listErr error
|
f.putFtpConnection(&c, err)
|
||||||
var files []*ftp.Entry
|
if err != nil {
|
||||||
|
return nil, translateErrorDir(err)
|
||||||
resultchan := make(chan []*ftp.Entry, 1)
|
|
||||||
errchan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
|
|
||||||
f.putFtpConnection(&c, err)
|
|
||||||
if err != nil {
|
|
||||||
errchan <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resultchan <- result
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Wait for List for up to Timeout seconds
|
|
||||||
timer := time.NewTimer(fs.Config.Timeout)
|
|
||||||
select {
|
|
||||||
case listErr = <-errchan:
|
|
||||||
timer.Stop()
|
|
||||||
return nil, translateErrorDir(listErr)
|
|
||||||
case files = <-resultchan:
|
|
||||||
timer.Stop()
|
|
||||||
case <-timer.C:
|
|
||||||
// if timer fired assume no error but connection dead
|
|
||||||
fs.Errorf(f, "Timeout when waiting for List")
|
|
||||||
return nil, errors.New("Timeout when waiting for List")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Annoyingly FTP returns success for a directory which
|
// Annoyingly FTP returns success for a directory which
|
||||||
// doesn't exist, so check it really doesn't exist if no
|
// doesn't exist, so check it really doesn't exist if no
|
||||||
// entries found.
|
// entries found.
|
||||||
@@ -455,7 +349,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
}
|
}
|
||||||
for i := range files {
|
for i := range files {
|
||||||
object := files[i]
|
object := files[i]
|
||||||
entryToStandard(object)
|
|
||||||
newremote := path.Join(dir, object.Name)
|
newremote := path.Join(dir, object.Name)
|
||||||
switch object.Type {
|
switch object.Type {
|
||||||
case ftp.EntryTypeFolder:
|
case ftp.EntryTypeFolder:
|
||||||
@@ -496,7 +389,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -506,13 +399,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
err = o.Update(ctx, in, src, options...)
|
err = o.Update(in, src, options...)
|
||||||
return o, err
|
return o, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(ctx, in, src, options...)
|
return f.Put(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
@@ -525,21 +418,19 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "getInfo")
|
return nil, errors.Wrap(err, "getInfo")
|
||||||
}
|
}
|
||||||
files, err := c.List(dirFromStandardPath(dir))
|
files, err := c.List(dir)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, translateErrorFile(err)
|
return nil, translateErrorFile(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range files {
|
for i := range files {
|
||||||
file := files[i]
|
if files[i].Name == base {
|
||||||
entryToStandard(file)
|
|
||||||
if file.Name == base {
|
|
||||||
info := &FileInfo{
|
info := &FileInfo{
|
||||||
Name: remote,
|
Name: remote,
|
||||||
Size: file.Size,
|
Size: files[i].Size,
|
||||||
ModTime: file.Time,
|
ModTime: files[i].Time,
|
||||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||||
}
|
}
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
@@ -549,7 +440,6 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
|||||||
|
|
||||||
// mkdir makes the directory and parents using unrooted paths
|
// mkdir makes the directory and parents using unrooted paths
|
||||||
func (f *Fs) mkdir(abspath string) error {
|
func (f *Fs) mkdir(abspath string) error {
|
||||||
abspath = path.Clean(abspath)
|
|
||||||
if abspath == "." || abspath == "/" {
|
if abspath == "." || abspath == "/" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -571,7 +461,7 @@ func (f *Fs) mkdir(abspath string) error {
|
|||||||
if connErr != nil {
|
if connErr != nil {
|
||||||
return errors.Wrap(connErr, "mkdir")
|
return errors.Wrap(connErr, "mkdir")
|
||||||
}
|
}
|
||||||
err = c.MakeDir(dirFromStandardPath(abspath))
|
err = c.MakeDir(abspath)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
switch errX := err.(type) {
|
switch errX := err.(type) {
|
||||||
case *textproto.Error:
|
case *textproto.Error:
|
||||||
@@ -593,7 +483,7 @@ func (f *Fs) mkParentDir(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(dir string) (err error) {
|
||||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(root)
|
||||||
@@ -602,18 +492,18 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
}
|
}
|
||||||
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
|
err = c.RemoveDir(path.Join(f.root, dir))
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
return translateErrorDir(err)
|
return translateErrorDir(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move renames a remote file object
|
// Move renames a remote file object
|
||||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@@ -628,14 +518,14 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, errors.Wrap(err, "Move")
|
return nil, errors.Wrap(err, "Move")
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
path.Join(srcObj.fs.root, srcObj.remote),
|
||||||
enc.FromStandardPath(path.Join(f.root, remote)),
|
path.Join(f.root, remote),
|
||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, errors.Wrap(err, "Move Rename failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(ctx, remote)
|
dstObj, err := f.NewObject(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||||
}
|
}
|
||||||
@@ -650,7 +540,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@@ -682,8 +572,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return errors.Wrap(err, "DirMove")
|
return errors.Wrap(err, "DirMove")
|
||||||
}
|
}
|
||||||
err = c.Rename(
|
err = c.Rename(
|
||||||
dirFromStandardPath(srcPath),
|
srcPath,
|
||||||
dirFromStandardPath(dstPath),
|
dstPath,
|
||||||
)
|
)
|
||||||
f.putFtpConnection(&c, err)
|
f.putFtpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -713,7 +603,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the hash of an object returning a lowercase hex string
|
// Hash returns the hash of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -723,12 +613,12 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
return o.info.ModTime
|
return o.info.ModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
// SetModTime sets the modification time of the object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(modTime time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -756,21 +646,7 @@ func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// Close the FTP reader and return the connection to the pool
|
// Close the FTP reader and return the connection to the pool
|
||||||
func (f *ftpReadCloser) Close() error {
|
func (f *ftpReadCloser) Close() error {
|
||||||
var err error
|
err := f.rc.Close()
|
||||||
errchan := make(chan error, 1)
|
|
||||||
go func() {
|
|
||||||
errchan <- f.rc.Close()
|
|
||||||
}()
|
|
||||||
// Wait for Close for up to 60 seconds
|
|
||||||
timer := time.NewTimer(60 * time.Second)
|
|
||||||
select {
|
|
||||||
case err = <-errchan:
|
|
||||||
timer.Stop()
|
|
||||||
case <-timer.C:
|
|
||||||
// if timer fired assume no error but connection dead
|
|
||||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// if errors while reading or closing, dump the connection
|
// if errors while reading or closing, dump the connection
|
||||||
if err != nil || f.err != nil {
|
if err != nil || f.err != nil {
|
||||||
_ = f.c.Quit()
|
_ = f.c.Quit()
|
||||||
@@ -789,7 +665,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
@@ -809,7 +685,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, errors.Wrap(err, "open")
|
||||||
}
|
}
|
||||||
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
|
fd, err := c.RetrFrom(path, uint64(offset))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putFtpConnection(&c, err)
|
o.fs.putFtpConnection(&c, err)
|
||||||
return nil, errors.Wrap(err, "open")
|
return nil, errors.Wrap(err, "open")
|
||||||
@@ -823,17 +699,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// remove the file if upload failed
|
// remove the file if upload failed
|
||||||
remove := func() {
|
remove := func() {
|
||||||
// Give the FTP server a chance to get its internal state in order after the error.
|
removeErr := o.Remove()
|
||||||
// The error may have been local in which case we closed the connection. The server
|
|
||||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
|
||||||
// able to think of a better method to find out if the server has finished - ncw
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
removeErr := o.Remove(ctx)
|
|
||||||
if removeErr != nil {
|
if removeErr != nil {
|
||||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||||
} else {
|
} else {
|
||||||
@@ -844,9 +715,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update")
|
return errors.Wrap(err, "Update")
|
||||||
}
|
}
|
||||||
err = c.Stor(enc.FromStandardPath(path), in)
|
err = c.Stor(path, in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = c.Quit() // toss this connection to avoid sync errors
|
_ = c.Quit()
|
||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "update stor")
|
return errors.Wrap(err, "update stor")
|
||||||
}
|
}
|
||||||
@@ -859,7 +730,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove() (err error) {
|
||||||
// defer fs.Trace(o, "")("err=%v", &err)
|
// defer fs.Trace(o, "")("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// Check if it's a directory or a file
|
// Check if it's a directory or a file
|
||||||
@@ -868,13 +739,13 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
err = o.fs.Rmdir(ctx, o.remote)
|
err = o.fs.Rmdir(o.remote)
|
||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
err = c.Delete(enc.FromStandardPath(path))
|
err = c.Delete(path)
|
||||||
o.fs.putFtpConnection(&c, err)
|
o.fs.putFtpConnection(&c, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ package ftp_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/ftp"
|
"github.com/ncw/rclone/backend/ftp"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -23,28 +22,26 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/fs/walk"
|
||||||
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
|
"github.com/ncw/rclone/lib/pacer"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/encodings"
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/walk"
|
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
|
||||||
// NOTE: This API is deprecated
|
|
||||||
storage "google.golang.org/api/storage/v1"
|
storage "google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -61,7 +58,7 @@ const (
|
|||||||
var (
|
var (
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
storageConfig = &oauth2.Config{
|
storageConfig = &oauth2.Config{
|
||||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
Scopes: []string{storage.DevstorageFullControlScope},
|
||||||
Endpoint: google.Endpoint,
|
Endpoint: google.Endpoint,
|
||||||
ClientID: rcloneClientID,
|
ClientID: rcloneClientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
@@ -69,8 +66,6 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
const enc = encodings.GoogleCloudStorage
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@@ -146,22 +141,6 @@ func init() {
|
|||||||
Value: "publicReadWrite",
|
Value: "publicReadWrite",
|
||||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "bucket_policy_only",
|
|
||||||
Help: `Access checks should use bucket-level IAM policies.
|
|
||||||
|
|
||||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
|
||||||
then you will need to set this.
|
|
||||||
|
|
||||||
When it is set, rclone:
|
|
||||||
|
|
||||||
- ignores ACLs set on buckets
|
|
||||||
- ignores ACLs set on objects
|
|
||||||
- creates buckets with Bucket Policy Only set
|
|
||||||
|
|
||||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "location",
|
Name: "location",
|
||||||
Help: "Location for the newly created buckets.",
|
Help: "Location for the newly created buckets.",
|
||||||
@@ -180,36 +159,21 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
}, {
|
}, {
|
||||||
Value: "asia-east1",
|
Value: "asia-east1",
|
||||||
Help: "Taiwan.",
|
Help: "Taiwan.",
|
||||||
}, {
|
|
||||||
Value: "asia-east2",
|
|
||||||
Help: "Hong Kong.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-northeast1",
|
Value: "asia-northeast1",
|
||||||
Help: "Tokyo.",
|
Help: "Tokyo.",
|
||||||
}, {
|
|
||||||
Value: "asia-south1",
|
|
||||||
Help: "Mumbai.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "asia-southeast1",
|
Value: "asia-southeast1",
|
||||||
Help: "Singapore.",
|
Help: "Singapore.",
|
||||||
}, {
|
}, {
|
||||||
Value: "australia-southeast1",
|
Value: "australia-southeast1",
|
||||||
Help: "Sydney.",
|
Help: "Sydney.",
|
||||||
}, {
|
|
||||||
Value: "europe-north1",
|
|
||||||
Help: "Finland.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west1",
|
Value: "europe-west1",
|
||||||
Help: "Belgium.",
|
Help: "Belgium.",
|
||||||
}, {
|
}, {
|
||||||
Value: "europe-west2",
|
Value: "europe-west2",
|
||||||
Help: "London.",
|
Help: "London.",
|
||||||
}, {
|
|
||||||
Value: "europe-west3",
|
|
||||||
Help: "Frankfurt.",
|
|
||||||
}, {
|
|
||||||
Value: "europe-west4",
|
|
||||||
Help: "Netherlands.",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-central1",
|
Value: "us-central1",
|
||||||
Help: "Iowa.",
|
Help: "Iowa.",
|
||||||
@@ -222,9 +186,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon.",
|
Help: "Oregon.",
|
||||||
}, {
|
|
||||||
Value: "us-west2",
|
|
||||||
Help: "California.",
|
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -259,23 +220,22 @@ type Options struct {
|
|||||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||||
ObjectACL string `config:"object_acl"`
|
ObjectACL string `config:"object_acl"`
|
||||||
BucketACL string `config:"bucket_acl"`
|
BucketACL string `config:"bucket_acl"`
|
||||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
|
||||||
Location string `config:"location"`
|
Location string `config:"location"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
svc *storage.Service // the connection to the storage server
|
svc *storage.Service // the connection to the storage server
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
rootBucket string // bucket part of root (if any)
|
bucket string // the bucket we are working on
|
||||||
rootDirectory string // directory part of root (if any)
|
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||||
cache *bucket.Cache // cache of bucket status
|
bucketOK bool // true if we have created the bucket
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *pacer.Pacer // To pace the API calls
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@@ -300,18 +260,18 @@ func (f *Fs) Name() string {
|
|||||||
|
|
||||||
// Root of the remote (as passed into NewFs)
|
// Root of the remote (as passed into NewFs)
|
||||||
func (f *Fs) Root() string {
|
func (f *Fs) Root() string {
|
||||||
return f.root
|
if f.root == "" {
|
||||||
|
return f.bucket
|
||||||
|
}
|
||||||
|
return f.bucket + "/" + f.root
|
||||||
}
|
}
|
||||||
|
|
||||||
// String converts this Fs to a string
|
// String converts this Fs to a string
|
||||||
func (f *Fs) String() string {
|
func (f *Fs) String() string {
|
||||||
if f.rootBucket == "" {
|
if f.root == "" {
|
||||||
return fmt.Sprintf("GCS root")
|
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||||
}
|
}
|
||||||
if f.rootDirectory == "" {
|
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
||||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Features returns the optional features of this Fs
|
// Features returns the optional features of this Fs
|
||||||
@@ -319,7 +279,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whether a given err rates being retried
|
// shouldRetry determines whehter a given err rates being retried
|
||||||
func shouldRetry(err error) (again bool, errOut error) {
|
func shouldRetry(err error) (again bool, errOut error) {
|
||||||
again = false
|
again = false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -343,24 +303,21 @@ func shouldRetry(err error) (again bool, errOut error) {
|
|||||||
return again, err
|
return again, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePath parses a remote 'url'
|
// Pattern to match a storage path
|
||||||
func parsePath(path string) (root string) {
|
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||||
root = strings.Trim(path, "/")
|
|
||||||
|
// parseParse parses a storage 'url'
|
||||||
|
func parsePath(path string) (bucket, directory string, err error) {
|
||||||
|
parts := matcher.FindStringSubmatch(path)
|
||||||
|
if parts == nil {
|
||||||
|
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||||
|
} else {
|
||||||
|
bucket, directory = parts[1], parts[2]
|
||||||
|
directory = strings.Trim(directory, "/")
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// split returns bucket and bucketPath from the rootRelativePath
|
|
||||||
// relative to f.root
|
|
||||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
|
||||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
|
||||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// split returns bucket and bucketPath from the object
|
|
||||||
func (o *Object) split() (bucket, bucketPath string) {
|
|
||||||
return o.fs.split(o.remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -370,15 +327,8 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
|||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setRoot changes the root of the Fs
|
// NewFs contstructs an Fs from the path, bucket:path
|
||||||
func (f *Fs) setRoot(root string) {
|
|
||||||
f.root = parsePath(root)
|
|
||||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
@@ -395,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// try loading service account credentials from env variable, then from a file
|
// try loading service account credentials from env variable, then from a file
|
||||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
|
||||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||||
@@ -410,27 +360,26 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx := context.Background()
|
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
bucket, directory, err := parsePath(root)
|
||||||
name: name,
|
if err != nil {
|
||||||
root: root,
|
return nil, err
|
||||||
opt: *opt,
|
}
|
||||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
|
||||||
cache: bucket.NewCache(),
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
bucket: bucket,
|
||||||
|
root: directory,
|
||||||
|
opt: *opt,
|
||||||
|
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||||
}
|
}
|
||||||
f.setRoot(root)
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
BucketBasedRootOK: true,
|
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
@@ -440,19 +389,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.root != "" {
|
||||||
|
f.root += "/"
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
newRoot := path.Dir(f.root)
|
f.root = path.Dir(directory)
|
||||||
if newRoot == "." {
|
if f.root == "." {
|
||||||
newRoot = ""
|
f.root = ""
|
||||||
|
} else {
|
||||||
|
f.root += "/"
|
||||||
}
|
}
|
||||||
f.setRoot(newRoot)
|
|
||||||
// return an error with an fs which points to the parent
|
// return an error with an fs which points to the parent
|
||||||
return f, fs.ErrorIsFile
|
return f, fs.ErrorIsFile
|
||||||
}
|
}
|
||||||
@@ -463,7 +413,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@@ -471,7 +421,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
|
|||||||
if info != nil {
|
if info != nil {
|
||||||
o.setMetaData(info)
|
o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
err := o.readMetaData() // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -481,8 +431,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list to handle an object.
|
// listFn is called from list to handle an object.
|
||||||
@@ -493,24 +443,20 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
|||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
//
|
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||||
// The remote has prefix removed from it and if addBucket is set
|
root := f.root
|
||||||
// then it adds the bucket to the start.
|
rootLength := len(root)
|
||||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
if dir != "" {
|
||||||
if prefix != "" {
|
root += dir + "/"
|
||||||
prefix += "/"
|
|
||||||
}
|
}
|
||||||
if directory != "" {
|
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
||||||
directory += "/"
|
|
||||||
}
|
|
||||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
|
||||||
if !recurse {
|
if !recurse {
|
||||||
list = list.Delimiter("/")
|
list = list.Delimiter("/")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
var objects *storage.Objects
|
var objects *storage.Objects
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
objects, err = list.Context(ctx).Do()
|
objects, err = list.Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -523,38 +469,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
if !recurse {
|
if !recurse {
|
||||||
var object storage.Object
|
var object storage.Object
|
||||||
for _, remote := range objects.Prefixes {
|
for _, prefix := range objects.Prefixes {
|
||||||
if !strings.HasSuffix(remote, "/") {
|
if !strings.HasSuffix(prefix, "/") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote = enc.ToStandardPath(remote)
|
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
||||||
if !strings.HasPrefix(remote, prefix) {
|
|
||||||
fs.Logf(f, "Odd name received %q", remote)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
remote = remote[len(prefix) : len(remote)-1]
|
|
||||||
if addBucket {
|
|
||||||
remote = path.Join(bucket, remote)
|
|
||||||
}
|
|
||||||
err = fn(remote, &object, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, object := range objects.Items {
|
for _, object := range objects.Items {
|
||||||
remote := enc.ToStandardPath(object.Name)
|
if !strings.HasPrefix(object.Name, root) {
|
||||||
if !strings.HasPrefix(remote, prefix) {
|
|
||||||
fs.Logf(f, "Odd name received %q", object.Name)
|
fs.Logf(f, "Odd name received %q", object.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote = remote[len(prefix):]
|
remote := object.Name[rootLength:]
|
||||||
isDirectory := strings.HasSuffix(remote, "/")
|
|
||||||
if addBucket {
|
|
||||||
remote = path.Join(bucket, remote)
|
|
||||||
}
|
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory && object.Size == 0 {
|
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||||
|
if recurse && remote != "" {
|
||||||
|
// add a directory in if --fast-list since will have no prefixes
|
||||||
|
err = fn(remote[:len(remote)-1], object, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
err = fn(remote, object, false)
|
err = fn(remote, object, false)
|
||||||
@@ -571,23 +510,32 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert a list item into a DirEntry
|
// Convert a list item into a DirEntry
|
||||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
o, err := f.newObjectWithInfo(remote, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mark the bucket as being OK
|
||||||
|
func (f *Fs) markBucketOK() {
|
||||||
|
if f.bucket != "" {
|
||||||
|
f.bucketOKMu.Lock()
|
||||||
|
f.bucketOK = true
|
||||||
|
f.bucketOKMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -600,12 +548,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// bucket must be present if listing succeeded
|
// bucket must be present if listing succeeded
|
||||||
f.cache.MarkOK(bucket)
|
f.markBucketOK()
|
||||||
return entries, err
|
return entries, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets lists the buckets
|
// listBuckets lists the buckets
|
||||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||||
|
if dir != "" {
|
||||||
|
return nil, fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return nil, errors.New("can't list buckets without project number")
|
return nil, errors.New("can't list buckets without project number")
|
||||||
}
|
}
|
||||||
@@ -613,14 +564,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
for {
|
for {
|
||||||
var buckets *storage.Buckets
|
var buckets *storage.Buckets
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
buckets, err = listBuckets.Context(ctx).Do()
|
buckets, err = listBuckets.Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, bucket := range buckets.Items {
|
for _, bucket := range buckets.Items {
|
||||||
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
|
d := fs.NewDir(bucket.Name, time.Time{})
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
if buckets.NextPageToken == "" {
|
if buckets.NextPageToken == "" {
|
||||||
@@ -640,15 +591,11 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
bucket, directory := f.split(dir)
|
if f.bucket == "" {
|
||||||
if bucket == "" {
|
return f.listBuckets(dir)
|
||||||
if directory != "" {
|
|
||||||
return nil, fs.ErrorListBucketRequired
|
|
||||||
}
|
|
||||||
return f.listBuckets(ctx)
|
|
||||||
}
|
}
|
||||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
return f.listDir(dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@@ -667,44 +614,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||||
bucket, directory := f.split(dir)
|
if f.bucket == "" {
|
||||||
|
return fs.ErrorListBucketRequired
|
||||||
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return list.Add(entry)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if bucket == "" {
|
|
||||||
entries, err := f.listBuckets(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, entry := range entries {
|
return list.Add(entry)
|
||||||
err = list.Add(entry)
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
bucket := entry.Remote()
|
|
||||||
err = listR(bucket, "", f.rootDirectory, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// bucket must be present if listing succeeded
|
|
||||||
f.cache.MarkOK(bucket)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// bucket must be present if listing succeeded
|
|
||||||
f.cache.MarkOK(bucket)
|
|
||||||
}
|
}
|
||||||
|
// bucket must be present if listing succeeded
|
||||||
|
f.markBucketOK()
|
||||||
return list.Flush()
|
return list.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -713,88 +639,83 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(ctx, in, src, options...)
|
return o, o.Update(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(ctx, in, src, options...)
|
return f.Put(in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(dir string) (err error) {
|
||||||
bucket, _ := f.split(dir)
|
f.bucketOKMu.Lock()
|
||||||
return f.makeBucket(ctx, bucket)
|
defer f.bucketOKMu.Unlock()
|
||||||
}
|
if f.bucketOK {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||||
|
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||||
|
|
||||||
// makeBucket creates the bucket if it doesn't exist
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||||
return f.cache.Create(bucket, func() error {
|
return shouldRetry(err)
|
||||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
})
|
||||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
if err == nil {
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
// Bucket already exists
|
||||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
f.bucketOK = true
|
||||||
return shouldRetry(err)
|
return nil
|
||||||
})
|
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
if err == nil {
|
if gErr.Code != http.StatusNotFound {
|
||||||
// Bucket already exists
|
|
||||||
return nil
|
|
||||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
|
||||||
if gErr.Code != http.StatusNotFound {
|
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return errors.Wrap(err, "failed to get bucket")
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
|
}
|
||||||
|
|
||||||
if f.opt.ProjectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return errors.New("can't make bucket without project number")
|
return errors.New("can't make bucket without project number")
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := storage.Bucket{
|
bucket := storage.Bucket{
|
||||||
Name: bucket,
|
Name: f.bucket,
|
||||||
Location: f.opt.Location,
|
Location: f.opt.Location,
|
||||||
StorageClass: f.opt.StorageClass,
|
StorageClass: f.opt.StorageClass,
|
||||||
}
|
}
|
||||||
if f.opt.BucketPolicyOnly {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
return shouldRetry(err)
|
||||||
Enabled: true,
|
})
|
||||||
},
|
if err == nil {
|
||||||
}
|
f.bucketOK = true
|
||||||
}
|
}
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return err
|
||||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
|
||||||
if !f.opt.BucketPolicyOnly {
|
|
||||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
|
||||||
}
|
|
||||||
_, err = insertBucket.Context(ctx).Do()
|
|
||||||
return shouldRetry(err)
|
|
||||||
})
|
|
||||||
}, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||||
// to delete was not empty.
|
// to delete was not empty.
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Rmdir(dir string) (err error) {
|
||||||
bucket, directory := f.split(dir)
|
f.bucketOKMu.Lock()
|
||||||
if bucket == "" || directory != "" {
|
defer f.bucketOKMu.Unlock()
|
||||||
|
if f.root != "" || dir != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return f.cache.Remove(bucket, func() error {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
return shouldRetry(err)
|
||||||
return shouldRetry(err)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
if err == nil {
|
||||||
|
f.bucketOK = false
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision returns the precision
|
// Precision returns the precision
|
||||||
@@ -811,9 +732,8 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstBucket, dstPath := f.split(remote)
|
err := f.Mkdir("")
|
||||||
err := f.makeBucket(ctx, dstBucket)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -822,7 +742,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
srcBucket, srcPath := srcObj.split()
|
|
||||||
|
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
dstObj := &Object{
|
dstObj := &Object{
|
||||||
@@ -830,13 +749,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srcBucket := srcObj.fs.bucket
|
||||||
|
srcObject := srcObj.fs.root + srcObj.remote
|
||||||
|
dstBucket := f.bucket
|
||||||
|
dstObject := f.root + remote
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||||
if !f.opt.BucketPolicyOnly {
|
|
||||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
|
||||||
}
|
|
||||||
newObject, err = copyObject.Context(ctx).Do()
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -873,7 +792,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -919,33 +838,24 @@ func (o *Object) setMetaData(info *storage.Object) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// readObjectInfo reads the definition for an object
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
//
|
||||||
bucket, bucketPath := o.split()
|
// it also sets the info
|
||||||
|
func (o *Object) readMetaData() (err error) {
|
||||||
|
if !o.modTime.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var object *storage.Object
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gErr, ok := err.(*googleapi.Error); ok {
|
if gErr, ok := err.(*googleapi.Error); ok {
|
||||||
if gErr.Code == http.StatusNotFound {
|
if gErr.Code == http.StatusNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return object, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
|
||||||
//
|
|
||||||
// it also sets the info
|
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|
||||||
if !o.modTime.IsZero() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
object, err := o.readObjectInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.setMetaData(object)
|
o.setMetaData(object)
|
||||||
@@ -956,8 +866,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@@ -973,28 +883,16 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||||
// read the complete existing object first
|
// This only adds metadata so will perserve other metadata
|
||||||
object, err := o.readObjectInfo(ctx)
|
object := storage.Object{
|
||||||
if err != nil {
|
Bucket: o.fs.bucket,
|
||||||
return err
|
Name: o.fs.root + o.remote,
|
||||||
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
// Add the mtime to the existing metadata
|
|
||||||
mtime := modTime.Format(timeFormatOut)
|
|
||||||
if object.Metadata == nil {
|
|
||||||
object.Metadata = make(map[string]string, 1)
|
|
||||||
}
|
|
||||||
object.Metadata[metaMtime] = mtime
|
|
||||||
// Copy the object to itself to update the metadata
|
|
||||||
// Using PATCH requires too many permissions
|
|
||||||
bucket, bucketPath := o.split()
|
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
|
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||||
if !o.fs.opt.BucketPolicyOnly {
|
|
||||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
|
||||||
}
|
|
||||||
newObject, err = copyObject.Context(ctx).Do()
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1010,13 +908,11 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest("GET", o.url, nil)
|
req, err := http.NewRequest("GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
fs.FixRangeOption(options, o.bytes)
|
|
||||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||||
var res *http.Response
|
var res *http.Response
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@@ -1043,27 +939,23 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
bucket, bucketPath := o.split()
|
err := o.fs.Mkdir("")
|
||||||
err := o.fs.makeBucket(ctx, bucket)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modTime := src.ModTime(ctx)
|
modTime := src.ModTime()
|
||||||
|
|
||||||
object := storage.Object{
|
object := storage.Object{
|
||||||
Bucket: bucket,
|
Bucket: o.fs.bucket,
|
||||||
Name: bucketPath,
|
Name: o.fs.root + o.remote,
|
||||||
ContentType: fs.MimeType(ctx, src),
|
ContentType: fs.MimeType(src),
|
||||||
|
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||||
Metadata: metadataFromModTime(modTime),
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||||
if !o.fs.opt.BucketPolicyOnly {
|
|
||||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
|
||||||
}
|
|
||||||
newObject, err = insertObject.Context(ctx).Do()
|
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1075,17 +967,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove() (err error) {
|
||||||
bucket, bucketPath := o.split()
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
func (o *Object) MimeType() string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
// Test GoogleCloudStorage filesystem interface
|
// Test GoogleCloudStorage filesystem interface
|
||||||
|
|
||||||
package googlecloudstorage_test
|
package googlecloudstorage_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
|
|||||||
@@ -1,148 +0,0 @@
|
|||||||
// This file contains the albums abstraction
|
|
||||||
|
|
||||||
package googlephotos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
// All the albums
|
|
||||||
type albums struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
dupes map[string][]*api.Album // duplicated names
|
|
||||||
byID map[string]*api.Album //..indexed by ID
|
|
||||||
byTitle map[string]*api.Album //..indexed by Title
|
|
||||||
path map[string][]string // partial album names to directory
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new album
|
|
||||||
func newAlbums() *albums {
|
|
||||||
return &albums{
|
|
||||||
dupes: map[string][]*api.Album{},
|
|
||||||
byID: map[string]*api.Album{},
|
|
||||||
byTitle: map[string]*api.Album{},
|
|
||||||
path: map[string][]string{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add an album
|
|
||||||
func (as *albums) add(album *api.Album) {
|
|
||||||
// Munge the name of the album into a sensible path name
|
|
||||||
album.Title = path.Clean(album.Title)
|
|
||||||
if album.Title == "." || album.Title == "/" {
|
|
||||||
album.Title = addID("", album.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
as.mu.Lock()
|
|
||||||
as._add(album)
|
|
||||||
as.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// _add an album - call with lock held
|
|
||||||
func (as *albums) _add(album *api.Album) {
|
|
||||||
// update dupes by title
|
|
||||||
dupes := as.dupes[album.Title]
|
|
||||||
dupes = append(dupes, album)
|
|
||||||
as.dupes[album.Title] = dupes
|
|
||||||
|
|
||||||
// Dedupe the album name if necessary
|
|
||||||
if len(dupes) >= 2 {
|
|
||||||
// If this is the first dupe, then need to adjust the first one
|
|
||||||
if len(dupes) == 2 {
|
|
||||||
firstAlbum := dupes[0]
|
|
||||||
as._del(firstAlbum)
|
|
||||||
as._add(firstAlbum)
|
|
||||||
// undo add of firstAlbum to dupes
|
|
||||||
as.dupes[album.Title] = dupes
|
|
||||||
}
|
|
||||||
album.Title = addID(album.Title, album.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the new album
|
|
||||||
as.byID[album.ID] = album
|
|
||||||
as.byTitle[album.Title] = album
|
|
||||||
|
|
||||||
// Store the partial paths
|
|
||||||
dir, leaf := album.Title, ""
|
|
||||||
for dir != "" {
|
|
||||||
i := strings.LastIndex(dir, "/")
|
|
||||||
if i >= 0 {
|
|
||||||
dir, leaf = dir[:i], dir[i+1:]
|
|
||||||
} else {
|
|
||||||
dir, leaf = "", dir
|
|
||||||
}
|
|
||||||
dirs := as.path[dir]
|
|
||||||
found := false
|
|
||||||
for _, dir := range dirs {
|
|
||||||
if dir == leaf {
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
as.path[dir] = append(as.path[dir], leaf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// del an album
|
|
||||||
func (as *albums) del(album *api.Album) {
|
|
||||||
as.mu.Lock()
|
|
||||||
as._del(album)
|
|
||||||
as.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// _del an album - call with lock held
|
|
||||||
func (as *albums) _del(album *api.Album) {
|
|
||||||
// We leave in dupes so it doesn't cause albums to get renamed
|
|
||||||
|
|
||||||
// Remove from byID and byTitle
|
|
||||||
delete(as.byID, album.ID)
|
|
||||||
delete(as.byTitle, album.Title)
|
|
||||||
|
|
||||||
// Remove from paths
|
|
||||||
dir, leaf := album.Title, ""
|
|
||||||
for dir != "" {
|
|
||||||
// Can't delete if this dir exists anywhere in the path structure
|
|
||||||
if _, found := as.path[dir]; found {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i := strings.LastIndex(dir, "/")
|
|
||||||
if i >= 0 {
|
|
||||||
dir, leaf = dir[:i], dir[i+1:]
|
|
||||||
} else {
|
|
||||||
dir, leaf = "", dir
|
|
||||||
}
|
|
||||||
dirs := as.path[dir]
|
|
||||||
for i, dir := range dirs {
|
|
||||||
if dir == leaf {
|
|
||||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(dirs) == 0 {
|
|
||||||
delete(as.path, dir)
|
|
||||||
} else {
|
|
||||||
as.path[dir] = dirs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get an album by title
|
|
||||||
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
|
||||||
as.mu.Lock()
|
|
||||||
defer as.mu.Unlock()
|
|
||||||
album, ok = as.byTitle[title]
|
|
||||||
return album, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDirs gets directories below an album path
|
|
||||||
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
|
||||||
as.mu.Lock()
|
|
||||||
defer as.mu.Unlock()
|
|
||||||
dirs, ok = as.path[albumPath]
|
|
||||||
return dirs, ok
|
|
||||||
}
|
|
||||||
@@ -1,311 +0,0 @@
|
|||||||
package googlephotos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNewAlbums(t *testing.T) {
|
|
||||||
albums := newAlbums()
|
|
||||||
assert.NotNil(t, albums.dupes)
|
|
||||||
assert.NotNil(t, albums.byID)
|
|
||||||
assert.NotNil(t, albums.byTitle)
|
|
||||||
assert.NotNil(t, albums.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlbumsAdd(t *testing.T) {
|
|
||||||
albums := newAlbums()
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{}, albums.path)
|
|
||||||
|
|
||||||
a1 := &api.Album{
|
|
||||||
Title: "one",
|
|
||||||
ID: "1",
|
|
||||||
}
|
|
||||||
albums.add(a1)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1": a1,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one": a1,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
a2 := &api.Album{
|
|
||||||
Title: "two",
|
|
||||||
ID: "2",
|
|
||||||
}
|
|
||||||
albums.add(a2)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1": a1,
|
|
||||||
"2": a2,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one": a1,
|
|
||||||
"two": a2,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
// Add a duplicate
|
|
||||||
a2a := &api.Album{
|
|
||||||
Title: "two",
|
|
||||||
ID: "2a",
|
|
||||||
}
|
|
||||||
albums.add(a2a)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1": a1,
|
|
||||||
"2": a2,
|
|
||||||
"2a": a2a,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one": a1,
|
|
||||||
"two {2}": a2,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2}", "two {2a}"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
// Add a sub directory
|
|
||||||
a1sub := &api.Album{
|
|
||||||
Title: "one/sub",
|
|
||||||
ID: "1sub",
|
|
||||||
}
|
|
||||||
albums.add(a1sub)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1": a1,
|
|
||||||
"2": a2,
|
|
||||||
"2a": a2a,
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one": a1,
|
|
||||||
"one/sub": a1sub,
|
|
||||||
"two {2}": a2,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2}", "two {2a}"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
// Add a weird path
|
|
||||||
a0 := &api.Album{
|
|
||||||
Title: "/../././..////.",
|
|
||||||
ID: "0",
|
|
||||||
}
|
|
||||||
albums.add(a0)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"{0}": []*api.Album{a0},
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"0": a0,
|
|
||||||
"1": a1,
|
|
||||||
"2": a2,
|
|
||||||
"2a": a2a,
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"{0}": a0,
|
|
||||||
"one": a1,
|
|
||||||
"one/sub": a1sub,
|
|
||||||
"two {2}": a2,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlbumsDel(t *testing.T) {
|
|
||||||
albums := newAlbums()
|
|
||||||
|
|
||||||
a1 := &api.Album{
|
|
||||||
Title: "one",
|
|
||||||
ID: "1",
|
|
||||||
}
|
|
||||||
albums.add(a1)
|
|
||||||
|
|
||||||
a2 := &api.Album{
|
|
||||||
Title: "two",
|
|
||||||
ID: "2",
|
|
||||||
}
|
|
||||||
albums.add(a2)
|
|
||||||
|
|
||||||
// Add a duplicate
|
|
||||||
a2a := &api.Album{
|
|
||||||
Title: "two",
|
|
||||||
ID: "2a",
|
|
||||||
}
|
|
||||||
albums.add(a2a)
|
|
||||||
|
|
||||||
// Add a sub directory
|
|
||||||
a1sub := &api.Album{
|
|
||||||
Title: "one/sub",
|
|
||||||
ID: "1sub",
|
|
||||||
}
|
|
||||||
albums.add(a1sub)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1": a1,
|
|
||||||
"2": a2,
|
|
||||||
"2a": a2a,
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one": a1,
|
|
||||||
"one/sub": a1sub,
|
|
||||||
"two {2}": a2,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2}", "two {2a}"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
albums.del(a1)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"2": a2,
|
|
||||||
"2a": a2a,
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one/sub": a1sub,
|
|
||||||
"two {2}": a2,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2}", "two {2a}"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
albums.del(a2)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"2a": a2a,
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one/sub": a1sub,
|
|
||||||
"two {2a}": a2a,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one", "two {2a}"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
albums.del(a2a)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"1sub": a1sub,
|
|
||||||
}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{
|
|
||||||
"one/sub": a1sub,
|
|
||||||
}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{
|
|
||||||
"": []string{"one"},
|
|
||||||
"one": []string{"sub"},
|
|
||||||
}, albums.path)
|
|
||||||
|
|
||||||
albums.del(a1sub)
|
|
||||||
|
|
||||||
assert.Equal(t, map[string][]*api.Album{
|
|
||||||
"one": []*api.Album{a1},
|
|
||||||
"two": []*api.Album{a2, a2a},
|
|
||||||
"one/sub": []*api.Album{a1sub},
|
|
||||||
}, albums.dupes)
|
|
||||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
|
||||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
|
||||||
assert.Equal(t, map[string][]string{}, albums.path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlbumsGet(t *testing.T) {
|
|
||||||
albums := newAlbums()
|
|
||||||
|
|
||||||
a1 := &api.Album{
|
|
||||||
Title: "one",
|
|
||||||
ID: "1",
|
|
||||||
}
|
|
||||||
albums.add(a1)
|
|
||||||
|
|
||||||
album, ok := albums.get("one")
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
assert.Equal(t, a1, album)
|
|
||||||
|
|
||||||
album, ok = albums.get("notfound")
|
|
||||||
assert.Equal(t, false, ok)
|
|
||||||
assert.Nil(t, album)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlbumsGetDirs(t *testing.T) {
|
|
||||||
albums := newAlbums()
|
|
||||||
|
|
||||||
a1 := &api.Album{
|
|
||||||
Title: "one",
|
|
||||||
ID: "1",
|
|
||||||
}
|
|
||||||
albums.add(a1)
|
|
||||||
|
|
||||||
dirs, ok := albums.getDirs("")
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
assert.Equal(t, []string{"one"}, dirs)
|
|
||||||
|
|
||||||
dirs, ok = albums.getDirs("notfound")
|
|
||||||
assert.Equal(t, false, ok)
|
|
||||||
assert.Nil(t, dirs)
|
|
||||||
}
|
|
||||||
@@ -1,190 +0,0 @@
|
|||||||
package api
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorDetails in the internals of the Error type
|
|
||||||
type ErrorDetails struct {
|
|
||||||
Code int `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is returned on errors
|
|
||||||
type Error struct {
|
|
||||||
Details ErrorDetails `json:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error statisfies error interface
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Album of photos
|
|
||||||
type Album struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
ProductURL string `json:"productUrl,omitempty"`
|
|
||||||
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
|
||||||
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
|
||||||
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
|
||||||
IsWriteable bool `json:"isWriteable,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListAlbums is returned from albums.list and sharedAlbums.list
|
|
||||||
type ListAlbums struct {
|
|
||||||
Albums []Album `json:"albums"`
|
|
||||||
SharedAlbums []Album `json:"sharedAlbums"`
|
|
||||||
NextPageToken string `json:"nextPageToken"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateAlbum creates an Album
|
|
||||||
type CreateAlbum struct {
|
|
||||||
Album *Album `json:"album"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MediaItem is a photo or video
|
|
||||||
type MediaItem struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
ProductURL string `json:"productUrl"`
|
|
||||||
BaseURL string `json:"baseUrl"`
|
|
||||||
MimeType string `json:"mimeType"`
|
|
||||||
MediaMetadata struct {
|
|
||||||
CreationTime time.Time `json:"creationTime"`
|
|
||||||
Width string `json:"width"`
|
|
||||||
Height string `json:"height"`
|
|
||||||
Photo struct {
|
|
||||||
} `json:"photo"`
|
|
||||||
} `json:"mediaMetadata"`
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MediaItems is returned from mediaitems.list, mediaitems.search
|
|
||||||
type MediaItems struct {
|
|
||||||
MediaItems []MediaItem `json:"mediaItems"`
|
|
||||||
NextPageToken string `json:"nextPageToken"`
|
|
||||||
}
|
|
||||||
|
|
||||||
//Content categories
|
|
||||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
|
||||||
// LANDSCAPES Media items containing landscapes.
|
|
||||||
// RECEIPTS Media items containing receipts.
|
|
||||||
// CITYSCAPES Media items containing cityscapes.
|
|
||||||
// LANDMARKS Media items containing landmarks.
|
|
||||||
// SELFIES Media items that are selfies.
|
|
||||||
// PEOPLE Media items containing people.
|
|
||||||
// PETS Media items containing pets.
|
|
||||||
// WEDDINGS Media items from weddings.
|
|
||||||
// BIRTHDAYS Media items from birthdays.
|
|
||||||
// DOCUMENTS Media items containing documents.
|
|
||||||
// TRAVEL Media items taken during travel.
|
|
||||||
// ANIMALS Media items containing animals.
|
|
||||||
// FOOD Media items containing food.
|
|
||||||
// SPORT Media items from sporting events.
|
|
||||||
// NIGHT Media items taken at night.
|
|
||||||
// PERFORMANCES Media items from performances.
|
|
||||||
// WHITEBOARDS Media items containing whiteboards.
|
|
||||||
// SCREENSHOTS Media items that are screenshots.
|
|
||||||
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
|
||||||
// ARTS Media items containing art.
|
|
||||||
// CRAFTS Media items containing crafts.
|
|
||||||
// FASHION Media items related to fashion.
|
|
||||||
// HOUSES Media items containing houses.
|
|
||||||
// GARDENS Media items containing gardens.
|
|
||||||
// FLOWERS Media items containing flowers.
|
|
||||||
// HOLIDAYS Media items taken of holidays.
|
|
||||||
|
|
||||||
// MediaTypes
|
|
||||||
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
|
||||||
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
|
||||||
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
|
||||||
|
|
||||||
// Features
|
|
||||||
// NONE Treated as if no filters are applied. All features are included.
|
|
||||||
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
|
||||||
|
|
||||||
// Date is used as part of SearchFilter
|
|
||||||
type Date struct {
|
|
||||||
Year int `json:"year,omitempty"`
|
|
||||||
Month int `json:"month,omitempty"`
|
|
||||||
Day int `json:"day,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DateFilter is uses to add date ranges to media item queries
|
|
||||||
type DateFilter struct {
|
|
||||||
Dates []Date `json:"dates,omitempty"`
|
|
||||||
Ranges []struct {
|
|
||||||
StartDate Date `json:"startDate,omitempty"`
|
|
||||||
EndDate Date `json:"endDate,omitempty"`
|
|
||||||
} `json:"ranges,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentFilter is uses to add content categories to media item queries
|
|
||||||
type ContentFilter struct {
|
|
||||||
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
|
||||||
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// MediaTypeFilter is uses to add media types to media item queries
|
|
||||||
type MediaTypeFilter struct {
|
|
||||||
MediaTypes []string `json:"mediaTypes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FeatureFilter is uses to add features to media item queries
|
|
||||||
type FeatureFilter struct {
|
|
||||||
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filters combines all the filter types for media item queries
|
|
||||||
type Filters struct {
|
|
||||||
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
|
||||||
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
|
||||||
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
|
||||||
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
|
||||||
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
|
||||||
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchFilter is uses with mediaItems.search
|
|
||||||
type SearchFilter struct {
|
|
||||||
AlbumID string `json:"albumId,omitempty"`
|
|
||||||
PageSize int `json:"pageSize"`
|
|
||||||
PageToken string `json:"pageToken,omitempty"`
|
|
||||||
Filters *Filters `json:"filters,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SimpleMediaItem is part of NewMediaItem
|
|
||||||
type SimpleMediaItem struct {
|
|
||||||
UploadToken string `json:"uploadToken"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMediaItem is a single media item for upload
|
|
||||||
type NewMediaItem struct {
|
|
||||||
Description string `json:"description"`
|
|
||||||
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchCreateRequest creates media items from upload tokens
|
|
||||||
type BatchCreateRequest struct {
|
|
||||||
AlbumID string `json:"albumId,omitempty"`
|
|
||||||
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchCreateResponse is returned from BatchCreateRequest
|
|
||||||
type BatchCreateResponse struct {
|
|
||||||
NewMediaItemResults []struct {
|
|
||||||
UploadToken string `json:"uploadToken"`
|
|
||||||
Status struct {
|
|
||||||
Message string `json:"message"`
|
|
||||||
Code int `json:"code"`
|
|
||||||
} `json:"status"`
|
|
||||||
MediaItem MediaItem `json:"mediaItem"`
|
|
||||||
} `json:"newMediaItemResults"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchRemoveItems is for removing items from an album
|
|
||||||
type BatchRemoveItems struct {
|
|
||||||
MediaItemIds []string `json:"mediaItemIds"`
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,307 +0,0 @@
|
|||||||
package googlephotos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// We have two different files here as Google Photos will uniq
|
|
||||||
// them otherwise which confuses the tests as the filename is
|
|
||||||
// unexpected.
|
|
||||||
fileNameAlbum = "rclone-test-image1.jpg"
|
|
||||||
fileNameUpload = "rclone-test-image2.jpg"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Wrapper to override the remote for an object
|
|
||||||
type overrideRemoteObject struct {
|
|
||||||
fs.Object
|
|
||||||
remote string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote returns the overridden remote name
|
|
||||||
func (o *overrideRemoteObject) Remote() string {
|
|
||||||
return o.remote
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
fstest.Initialise()
|
|
||||||
|
|
||||||
// Create Fs
|
|
||||||
if *fstest.RemoteName == "" {
|
|
||||||
*fstest.RemoteName = "TestGooglePhotos:"
|
|
||||||
}
|
|
||||||
f, err := fs.NewFs(*fstest.RemoteName)
|
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
|
||||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create local Fs pointing at testfiles
|
|
||||||
localFs, err := fs.NewFs("testfiles")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("CreateAlbum", func(t *testing.T) {
|
|
||||||
albumName := "album/rclone-test-" + random.String(24)
|
|
||||||
err = f.Mkdir(ctx, albumName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
remote := albumName + "/" + fileNameAlbum
|
|
||||||
|
|
||||||
t.Run("PutFile", func(t *testing.T) {
|
|
||||||
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
|
||||||
require.NoError(t, err)
|
|
||||||
in, err := srcObj.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, remote, dstObj.Remote())
|
|
||||||
_ = in.Close()
|
|
||||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
|
||||||
|
|
||||||
t.Run("ObjectFs", func(t *testing.T) {
|
|
||||||
assert.Equal(t, f, dstObj.Fs())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectString", func(t *testing.T) {
|
|
||||||
assert.Equal(t, remote, dstObj.String())
|
|
||||||
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectHash", func(t *testing.T) {
|
|
||||||
h, err := dstObj.Hash(ctx, hash.MD5)
|
|
||||||
assert.Equal(t, "", h)
|
|
||||||
assert.Equal(t, hash.ErrUnsupported, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectSize", func(t *testing.T) {
|
|
||||||
assert.Equal(t, int64(-1), dstObj.Size())
|
|
||||||
f.(*Fs).opt.ReadSize = true
|
|
||||||
defer func() {
|
|
||||||
f.(*Fs).opt.ReadSize = false
|
|
||||||
}()
|
|
||||||
size := dstObj.Size()
|
|
||||||
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectSetModTime", func(t *testing.T) {
|
|
||||||
err := dstObj.SetModTime(ctx, time.Now())
|
|
||||||
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectStorable", func(t *testing.T) {
|
|
||||||
assert.True(t, dstObj.Storable())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ObjectOpen", func(t *testing.T) {
|
|
||||||
in, err := dstObj.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
buf, err := ioutil.ReadAll(in)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, in.Close())
|
|
||||||
assert.True(t, len(buf) > 1000)
|
|
||||||
contentType := http.DetectContentType(buf[:512])
|
|
||||||
assert.Equal(t, "image/jpeg", contentType)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
|
||||||
entries, err := f.List(ctx, albumName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(entries))
|
|
||||||
assert.Equal(t, remote, entries[0].Remote())
|
|
||||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
|
||||||
})
|
|
||||||
|
|
||||||
// Check it is there in the date/month/year heirachy
|
|
||||||
// 2013-07-13 is the creation date of the folder
|
|
||||||
checkPresent := func(t *testing.T, objPath string) {
|
|
||||||
entries, err := f.List(ctx, objPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
found := false
|
|
||||||
for _, entry := range entries {
|
|
||||||
leaf := path.Base(entry.Remote())
|
|
||||||
if leaf == fileNameAlbum || leaf == remoteWithID {
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("CheckInByYear", func(t *testing.T) {
|
|
||||||
checkPresent(t, "media/by-year/2013")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CheckInByMonth", func(t *testing.T) {
|
|
||||||
checkPresent(t, "media/by-month/2013/2013-07")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CheckInByDay", func(t *testing.T) {
|
|
||||||
checkPresent(t, "media/by-day/2013/2013-07-26")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, remote, o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, remoteWithID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, remoteWithID, o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
|
||||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
|
||||||
assert.Equal(t, fs.ErrorIsFile, err)
|
|
||||||
leaf := path.Base(remote)
|
|
||||||
o, err := fNew.NewObject(ctx, leaf)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, leaf, o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
|
||||||
err = dstObj.Remove(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
// Check album empty
|
|
||||||
entries, err := f.List(ctx, albumName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(entries))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
// remove the album
|
|
||||||
err = f.Rmdir(ctx, albumName)
|
|
||||||
require.Error(t, err) // FIXME doesn't work yet
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("UploadMkdir", func(t *testing.T) {
|
|
||||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
|
||||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
|
||||||
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
entries, err := f.List(ctx, "upload")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(entries))
|
|
||||||
assert.Equal(t, "upload/dir", entries[0].Remote())
|
|
||||||
|
|
||||||
entries, err = f.List(ctx, "upload/dir")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(entries))
|
|
||||||
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Rmdir", func(t *testing.T) {
|
|
||||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
|
||||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ListEmpty", func(t *testing.T) {
|
|
||||||
entries, err := f.List(ctx, "upload")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, 0, len(entries))
|
|
||||||
|
|
||||||
_, err = f.List(ctx, "upload/dir")
|
|
||||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Upload", func(t *testing.T) {
|
|
||||||
uploadDir := "upload/dir/subdir"
|
|
||||||
remote := path.Join(uploadDir, fileNameUpload)
|
|
||||||
|
|
||||||
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
in, err := srcObj.Open(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
dstObj, err := f.Put(ctx, in, &overrideRemoteObject{srcObj, remote})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, remote, dstObj.Remote())
|
|
||||||
_ = in.Close()
|
|
||||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
|
||||||
|
|
||||||
t.Run("List", func(t *testing.T) {
|
|
||||||
entries, err := f.List(ctx, uploadDir)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(entries))
|
|
||||||
assert.Equal(t, remote, entries[0].Remote())
|
|
||||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObject", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, remote)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, remote, o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
|
||||||
o, err := f.NewObject(ctx, remoteWithID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, remoteWithID, o.Remote())
|
|
||||||
})
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Name", func(t *testing.T) {
|
|
||||||
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Root", func(t *testing.T) {
|
|
||||||
assert.Equal(t, "", f.Root())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("String", func(t *testing.T) {
|
|
||||||
assert.Equal(t, `Google Photos path ""`, f.String())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Features", func(t *testing.T) {
|
|
||||||
features := f.Features()
|
|
||||||
assert.False(t, features.CaseInsensitive)
|
|
||||||
assert.True(t, features.ReadMimeType)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Precision", func(t *testing.T) {
|
|
||||||
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Hashes", func(t *testing.T) {
|
|
||||||
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAddID(t *testing.T) {
|
|
||||||
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
|
||||||
assert.Equal(t, "{123}", addID("", "123"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFileAddID(t *testing.T) {
|
|
||||||
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
|
||||||
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
|
||||||
assert.Equal(t, "{123}", addFileID("", "123"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindID(t *testing.T) {
|
|
||||||
assert.Equal(t, "", findID("potato"))
|
|
||||||
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
|
||||||
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
|
||||||
ID = ID[1:]
|
|
||||||
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
|
||||||
}
|
|
||||||
@@ -1,335 +0,0 @@
|
|||||||
// Store the parsing of file patterns
|
|
||||||
|
|
||||||
package googlephotos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// lister describes the subset of the interfaces on Fs needed for the
|
|
||||||
// file pattern parsing
|
|
||||||
type lister interface {
|
|
||||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
|
||||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
|
||||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
|
||||||
dirTime() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// dirPattern describes a single directory pattern
|
|
||||||
type dirPattern struct {
|
|
||||||
re string // match for the path
|
|
||||||
match *regexp.Regexp // compiled match
|
|
||||||
canUpload bool // true if can upload here
|
|
||||||
canMkdir bool // true if can make a directory here
|
|
||||||
isFile bool // true if this is a file
|
|
||||||
isUpload bool // true if this is the upload directory
|
|
||||||
// function to turn a match into DirEntries
|
|
||||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// dirPatters is a slice of all the directory patterns
|
|
||||||
type dirPatterns []dirPattern
|
|
||||||
|
|
||||||
// patterns describes the layout of the google photos backend file system.
|
|
||||||
//
|
|
||||||
// NB no trailing / on paths
|
|
||||||
var patterns = dirPatterns{
|
|
||||||
{
|
|
||||||
re: `^$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
return fs.DirEntries{
|
|
||||||
fs.NewDir(prefix+"media", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"album", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"upload", f.dirTime()),
|
|
||||||
}, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^upload(?:/(.*))?$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
return f.listUploads(ctx, match[0])
|
|
||||||
},
|
|
||||||
canUpload: true,
|
|
||||||
canMkdir: true,
|
|
||||||
isUpload: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^upload/(.*)$`,
|
|
||||||
isFile: true,
|
|
||||||
canUpload: true,
|
|
||||||
isUpload: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
return fs.DirEntries{
|
|
||||||
fs.NewDir(prefix+"all", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"by-year", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"by-month", f.dirTime()),
|
|
||||||
fs.NewDir(prefix+"by-day", f.dirTime()),
|
|
||||||
}, nil
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/all$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
return f.listDir(ctx, prefix, api.SearchFilter{})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/all/([^/]+)$`,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-year$`,
|
|
||||||
toEntries: years,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-year/(\d{4})$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.listDir(ctx, prefix, filter)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-month$`,
|
|
||||||
toEntries: years,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-month/(\d{4})$`,
|
|
||||||
toEntries: months,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.listDir(ctx, prefix, filter)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-day$`,
|
|
||||||
toEntries: years,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-day/(\d{4})$`,
|
|
||||||
toEntries: days,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
|
||||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.listDir(ctx, prefix, filter)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^album$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
return albumsToEntries(ctx, f, false, prefix, "")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^album/(.+)$`,
|
|
||||||
canMkdir: true,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
return albumsToEntries(ctx, f, false, prefix, match[1])
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^album/(.+?)/([^/]+)$`,
|
|
||||||
canUpload: true,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^shared-album$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
return albumsToEntries(ctx, f, true, prefix, "")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^shared-album/(.+)$`,
|
|
||||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
return albumsToEntries(ctx, f, true, prefix, match[1])
|
|
||||||
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
re: `^shared-album/(.+?)/([^/]+)$`,
|
|
||||||
isFile: true,
|
|
||||||
},
|
|
||||||
}.mustCompile()
|
|
||||||
|
|
||||||
// mustCompile compiles the regexps in the dirPatterns
|
|
||||||
func (ds dirPatterns) mustCompile() dirPatterns {
|
|
||||||
for i := range ds {
|
|
||||||
pattern := &ds[i]
|
|
||||||
pattern.match = regexp.MustCompile(pattern.re)
|
|
||||||
}
|
|
||||||
return ds
|
|
||||||
}
|
|
||||||
|
|
||||||
// match finds the path passed in in the matching structure and
|
|
||||||
// returns the parameters and a pointer to the match, or nil.
|
|
||||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
|
||||||
itemPath = strings.Trim(itemPath, "/")
|
|
||||||
absPath := path.Join(root, itemPath)
|
|
||||||
prefix = strings.Trim(absPath[len(root):], "/")
|
|
||||||
if prefix != "" {
|
|
||||||
prefix += "/"
|
|
||||||
}
|
|
||||||
for i := range ds {
|
|
||||||
pattern = &ds[i]
|
|
||||||
if pattern.isFile != isFile {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
match = pattern.match.FindStringSubmatch(absPath)
|
|
||||||
if match != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the years from 2000 to today
|
|
||||||
// FIXME make configurable?
|
|
||||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
currentYear := f.dirTime().Year()
|
|
||||||
for year := 2000; year <= currentYear; year++ {
|
|
||||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the months in a given year
|
|
||||||
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
year := match[1]
|
|
||||||
for month := 1; month <= 12; month++ {
|
|
||||||
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the days in a given year
|
|
||||||
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
|
||||||
year := match[1]
|
|
||||||
current, err := time.Parse("2006", year)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Errorf("bad year %q", match[1])
|
|
||||||
}
|
|
||||||
currentYear := current.Year()
|
|
||||||
for current.Year() == currentYear {
|
|
||||||
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
|
||||||
current = current.AddDate(0, 0, 1)
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This creates a search filter on year/month/day as provided
|
|
||||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
|
||||||
year, err := strconv.Atoi(match[1])
|
|
||||||
if err != nil || year < 1000 || year > 3000 {
|
|
||||||
return sf, errors.Errorf("bad year %q", match[1])
|
|
||||||
}
|
|
||||||
sf = api.SearchFilter{
|
|
||||||
Filters: &api.Filters{
|
|
||||||
DateFilter: &api.DateFilter{
|
|
||||||
Dates: []api.Date{
|
|
||||||
{
|
|
||||||
Year: year,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if len(match) >= 3 {
|
|
||||||
month, err := strconv.Atoi(match[2])
|
|
||||||
if err != nil || month < 1 || month > 12 {
|
|
||||||
return sf, errors.Errorf("bad month %q", match[2])
|
|
||||||
}
|
|
||||||
sf.Filters.DateFilter.Dates[0].Month = month
|
|
||||||
}
|
|
||||||
if len(match) >= 4 {
|
|
||||||
day, err := strconv.Atoi(match[3])
|
|
||||||
if err != nil || day < 1 || day > 31 {
|
|
||||||
return sf, errors.Errorf("bad day %q", match[3])
|
|
||||||
}
|
|
||||||
sf.Filters.DateFilter.Dates[0].Day = day
|
|
||||||
}
|
|
||||||
return sf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turns an albumPath into entries
|
|
||||||
//
|
|
||||||
// These can either be synthetic directory entries if the album path
|
|
||||||
// is a prefix of another album, or actual files, or a combination of
|
|
||||||
// the two.
|
|
||||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
|
||||||
albums, err := f.listAlbums(ctx, shared)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Put in the directories
|
|
||||||
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
|
||||||
if foundAlbumPath {
|
|
||||||
for _, dir := range dirs {
|
|
||||||
d := fs.NewDir(prefix+dir, f.dirTime())
|
|
||||||
dirPath := path.Join(albumPath, dir)
|
|
||||||
// if this dir is an album add more special stuff
|
|
||||||
album, ok := albums.get(dirPath)
|
|
||||||
if ok {
|
|
||||||
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(f, "Error reading media count: %v", err)
|
|
||||||
}
|
|
||||||
d.SetID(album.ID).SetItems(count)
|
|
||||||
}
|
|
||||||
entries = append(entries, d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if this is an album then return a filter to list it
|
|
||||||
album, foundAlbum := albums.get(albumPath)
|
|
||||||
if foundAlbum {
|
|
||||||
filter := api.SearchFilter{AlbumID: album.ID}
|
|
||||||
newEntries, err := f.listDir(ctx, prefix, filter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
entries = append(entries, newEntries...)
|
|
||||||
}
|
|
||||||
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
@@ -1,495 +0,0 @@
|
|||||||
package googlephotos
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/dirtree"
|
|
||||||
"github.com/rclone/rclone/fstest"
|
|
||||||
"github.com/rclone/rclone/fstest/mockobject"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// time for directories
|
|
||||||
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
|
||||||
|
|
||||||
// mock Fs for testing patterns
|
|
||||||
type testLister struct {
|
|
||||||
t *testing.T
|
|
||||||
albums *albums
|
|
||||||
names []string
|
|
||||||
uploaded dirtree.DirTree
|
|
||||||
}
|
|
||||||
|
|
||||||
// newTestLister makes a mock for testing
|
|
||||||
func newTestLister(t *testing.T) *testLister {
|
|
||||||
return &testLister{
|
|
||||||
t: t,
|
|
||||||
albums: newAlbums(),
|
|
||||||
uploaded: dirtree.New(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mock listDir for testing
|
|
||||||
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
|
||||||
for _, name := range f.names {
|
|
||||||
entries = append(entries, mockobject.New(prefix+name))
|
|
||||||
}
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mock listAlbums for testing
|
|
||||||
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
|
||||||
return f.albums, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mock listUploads for testing
|
|
||||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
||||||
entries, _ = f.uploaded[dir]
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mock dirTime for testing
|
|
||||||
func (f *testLister) dirTime() time.Time {
|
|
||||||
return startTime
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternMatch(t *testing.T) {
|
|
||||||
for testNumber, test := range []struct {
|
|
||||||
// input
|
|
||||||
root string
|
|
||||||
itemPath string
|
|
||||||
isFile bool
|
|
||||||
// expected output
|
|
||||||
wantMatch []string
|
|
||||||
wantPrefix string
|
|
||||||
wantPattern *dirPattern
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{""},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[0],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: true,
|
|
||||||
wantMatch: nil,
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "upload",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"upload", ""},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[1],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "upload/dir",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"upload/dir", "dir"},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[1],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "upload/file.jpg",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: true,
|
|
||||||
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[2],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"media"},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[3],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
itemPath: "media",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"media"},
|
|
||||||
wantPrefix: "media/",
|
|
||||||
wantPattern: &patterns[3],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/all",
|
|
||||||
itemPath: "",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"media/all"},
|
|
||||||
wantPrefix: "",
|
|
||||||
wantPattern: &patterns[4],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media",
|
|
||||||
itemPath: "all",
|
|
||||||
isFile: false,
|
|
||||||
wantMatch: []string{"media/all"},
|
|
||||||
wantPrefix: "all/",
|
|
||||||
wantPattern: &patterns[4],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/all",
|
|
||||||
itemPath: "file.jpg",
|
|
||||||
isFile: true,
|
|
||||||
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
|
||||||
wantPrefix: "file.jpg/",
|
|
||||||
wantPattern: &patterns[5],
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
|
||||||
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
|
||||||
assert.Equal(t, test.wantMatch, gotMatch)
|
|
||||||
assert.Equal(t, test.wantPrefix, gotPrefix)
|
|
||||||
assert.Equal(t, test.wantPattern, gotPattern)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternMatchToEntries(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
f := newTestLister(t)
|
|
||||||
f.names = []string{"file.jpg"}
|
|
||||||
f.albums.add(&api.Album{
|
|
||||||
ID: "1",
|
|
||||||
Title: "sub/one",
|
|
||||||
})
|
|
||||||
f.albums.add(&api.Album{
|
|
||||||
ID: "2",
|
|
||||||
Title: "sub",
|
|
||||||
})
|
|
||||||
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
|
||||||
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
|
||||||
|
|
||||||
for testNumber, test := range []struct {
|
|
||||||
// input
|
|
||||||
root string
|
|
||||||
itemPath string
|
|
||||||
// expected output
|
|
||||||
wantMatch []string
|
|
||||||
wantPrefix string
|
|
||||||
remotes []string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
root: "",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{""},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "upload",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"upload", ""},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "upload",
|
|
||||||
itemPath: "dir",
|
|
||||||
wantMatch: []string{"upload/dir", "dir"},
|
|
||||||
wantPrefix: "dir/",
|
|
||||||
remotes: []string{"upload/dir/file2.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/all",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/all"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media",
|
|
||||||
itemPath: "all",
|
|
||||||
wantMatch: []string{"media/all"},
|
|
||||||
wantPrefix: "all/",
|
|
||||||
remotes: []string{"all/file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-year",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-year"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-year/2000",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-year/2000", "2000"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-month",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-month"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-month/2001",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-month/2001", "2001"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-month/2001/2001-01",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-day",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-day"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-day/2001",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-day/2001", "2001"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "media/by-day/2001/2001-01-02",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "album",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"album"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"sub/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "album/sub",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"album/sub", "sub"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"one/", "file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "album/sub/one",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"album/sub/one", "sub/one"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "shared-album",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"shared-album"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"sub/"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "shared-album/sub",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"shared-album/sub", "sub"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"one/", "file.jpg"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
root: "shared-album/sub/one",
|
|
||||||
itemPath: "",
|
|
||||||
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
|
||||||
wantPrefix: "",
|
|
||||||
remotes: []string{"file.jpg"},
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
|
||||||
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
|
||||||
assert.Equal(t, test.wantMatch, match)
|
|
||||||
assert.Equal(t, test.wantPrefix, prefix)
|
|
||||||
assert.NotNil(t, pattern)
|
|
||||||
assert.NotNil(t, pattern.toEntries)
|
|
||||||
|
|
||||||
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
var remotes = []string{}
|
|
||||||
for _, entry := range entries {
|
|
||||||
remote := entry.Remote()
|
|
||||||
if _, isDir := entry.(fs.Directory); isDir {
|
|
||||||
remote += "/"
|
|
||||||
}
|
|
||||||
remotes = append(remotes, remote)
|
|
||||||
if len(remotes) >= 4 {
|
|
||||||
break // only test first 4 entries
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert.Equal(t, test.remotes, remotes)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternYears(t *testing.T) {
|
|
||||||
f := newTestLister(t)
|
|
||||||
entries, err := years(context.Background(), f, "potato/", nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
year := 2000
|
|
||||||
for _, entry := range entries {
|
|
||||||
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
|
||||||
year++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternMonths(t *testing.T) {
|
|
||||||
f := newTestLister(t)
|
|
||||||
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, 12, len(entries))
|
|
||||||
for i, entry := range entries {
|
|
||||||
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternDays(t *testing.T) {
|
|
||||||
f := newTestLister(t)
|
|
||||||
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, 366, len(entries))
|
|
||||||
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
|
||||||
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternYearMonthDayFilter(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
f := newTestLister(t)
|
|
||||||
|
|
||||||
// Years
|
|
||||||
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, api.SearchFilter{
|
|
||||||
Filters: &api.Filters{
|
|
||||||
DateFilter: &api.DateFilter{
|
|
||||||
Dates: []api.Date{
|
|
||||||
{
|
|
||||||
Year: 2000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, sf)
|
|
||||||
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// Months
|
|
||||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, api.SearchFilter{
|
|
||||||
Filters: &api.Filters{
|
|
||||||
DateFilter: &api.DateFilter{
|
|
||||||
Dates: []api.Date{
|
|
||||||
{
|
|
||||||
Month: 1,
|
|
||||||
Year: 2000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, sf)
|
|
||||||
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
// Days
|
|
||||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, api.SearchFilter{
|
|
||||||
Filters: &api.Filters{
|
|
||||||
DateFilter: &api.DateFilter{
|
|
||||||
Dates: []api.Date{
|
|
||||||
{
|
|
||||||
Day: 2,
|
|
||||||
Month: 1,
|
|
||||||
Year: 2000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, sf)
|
|
||||||
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
|
||||||
require.Error(t, err)
|
|
||||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPatternAlbumsToEntries(t *testing.T) {
|
|
||||||
f := newTestLister(t)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
|
||||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
|
||||||
|
|
||||||
f.albums.add(&api.Album{
|
|
||||||
ID: "1",
|
|
||||||
Title: "sub/one",
|
|
||||||
})
|
|
||||||
|
|
||||||
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 1, len(entries))
|
|
||||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
|
||||||
_, ok := entries[0].(fs.Directory)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
|
|
||||||
f.albums.add(&api.Album{
|
|
||||||
ID: "1",
|
|
||||||
Title: "sub",
|
|
||||||
})
|
|
||||||
f.names = []string{"file.jpg"}
|
|
||||||
|
|
||||||
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, 2, len(entries))
|
|
||||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
|
||||||
_, ok = entries[0].(fs.Directory)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
|
||||||
_, ok = entries[1].(fs.Object)
|
|
||||||
assert.Equal(t, true, ok)
|
|
||||||
|
|
||||||
}
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 16 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 16 KiB |
@@ -5,24 +5,21 @@
|
|||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
|
"github.com/ncw/rclone/fs/hash"
|
||||||
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/lib/rest"
|
|
||||||
"golang.org/x/net/html"
|
"golang.org/x/net/html"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -43,61 +40,7 @@ func init() {
|
|||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://example.com",
|
Value: "https://example.com",
|
||||||
Help: "Connect to example.com",
|
Help: "Connect to example.com",
|
||||||
}, {
|
|
||||||
Value: "https://user:pass@example.com",
|
|
||||||
Help: "Connect to example.com using a username and password",
|
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "headers",
|
|
||||||
Help: `Set HTTP headers for all transactions
|
|
||||||
|
|
||||||
Use this to set additional HTTP headers for all transactions
|
|
||||||
|
|
||||||
The input format is comma separated list of key,value pairs. Standard
|
|
||||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
|
||||||
|
|
||||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
|
||||||
|
|
||||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
|
||||||
`,
|
|
||||||
Default: fs.CommaSepList{},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "no_slash",
|
|
||||||
Help: `Set this if the site doesn't end directories with /
|
|
||||||
|
|
||||||
Use this if your target website does not use / on the end of
|
|
||||||
directories.
|
|
||||||
|
|
||||||
A / on the end of a path is how rclone normally tells the difference
|
|
||||||
between files and directories. If this flag is set, then rclone will
|
|
||||||
treat all files with Content-Type: text/html as directories and read
|
|
||||||
URLs from them rather than downloading them.
|
|
||||||
|
|
||||||
Note that this may cause rclone to confuse genuine HTML files with
|
|
||||||
directories.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
|
||||||
Name: "no_head",
|
|
||||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
|
||||||
|
|
||||||
If your site is being very slow to load then you can try this option.
|
|
||||||
Normally rclone does a HEAD request for each potential file in a
|
|
||||||
directory listing to:
|
|
||||||
|
|
||||||
- find its size
|
|
||||||
- check it really exists
|
|
||||||
- check to see if it is a directory
|
|
||||||
|
|
||||||
If you set this option, rclone will not do the HEAD request. This will mean
|
|
||||||
|
|
||||||
- directory listings are much quicker
|
|
||||||
- rclone won't have the times or sizes of any files
|
|
||||||
- some files that don't exist may be in the listing
|
|
||||||
`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
@@ -105,10 +48,7 @@ If you set this option, rclone will not do the HEAD request. This will mean
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Endpoint string `config:"url"`
|
Endpoint string `config:"url"`
|
||||||
NoSlash bool `config:"no_slash"`
|
|
||||||
NoHead bool `config:"no_head"`
|
|
||||||
Headers fs.CommaSepList `config:"headers"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
// Fs stores the interface to the remote HTTP files
|
||||||
@@ -146,7 +86,6 @@ func statusError(res *http.Response, err error) error {
|
|||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
ctx := context.TODO()
|
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@@ -154,10 +93,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(opt.Headers)%2 != 0 {
|
|
||||||
return nil, errors.New("odd number of headers supplied")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||||
opt.Endpoint += "/"
|
opt.Endpoint += "/"
|
||||||
}
|
}
|
||||||
@@ -183,15 +118,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
// check to see if points to a file
|
// check to see if points to a file
|
||||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
res, err := noRedir.Head(u.String())
|
||||||
|
err = statusError(res, err)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
isFile = true
|
||||||
addHeaders(req, opt)
|
|
||||||
res, err := noRedir.Do(req)
|
|
||||||
err = statusError(res, err)
|
|
||||||
if err == nil {
|
|
||||||
isFile = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,14 +186,14 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
// NewObject creates a new remote http file object
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
err := o.stat(ctx)
|
err := o.stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, errors.Wrap(err, "Stat failed")
|
||||||
}
|
}
|
||||||
return o, nil
|
return o, nil
|
||||||
}
|
}
|
||||||
@@ -318,7 +248,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
|||||||
}
|
}
|
||||||
// calculate the name relative to the base
|
// calculate the name relative to the base
|
||||||
name = u.Path[len(base.Path):]
|
name = u.Path[len(base.Path):]
|
||||||
// mustn't be empty
|
// musn't be empty
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return "", errNameIsEmpty
|
return "", errNameIsEmpty
|
||||||
}
|
}
|
||||||
@@ -337,20 +267,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var (
|
var walk func(*html.Node)
|
||||||
walk func(*html.Node)
|
|
||||||
seen = make(map[string]struct{})
|
|
||||||
)
|
|
||||||
walk = func(n *html.Node) {
|
walk = func(n *html.Node) {
|
||||||
if n.Type == html.ElementNode && n.Data == "a" {
|
if n.Type == html.ElementNode && n.Data == "a" {
|
||||||
for _, a := range n.Attr {
|
for _, a := range n.Attr {
|
||||||
if a.Key == "href" {
|
if a.Key == "href" {
|
||||||
name, err := parseName(base, a.Val)
|
name, err := parseName(base, a.Val)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if _, found := seen[name]; !found {
|
names = append(names, name)
|
||||||
names = append(names, name)
|
|
||||||
seen[name] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -364,22 +288,8 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds the configured headers to the request if any
|
|
||||||
func addHeaders(req *http.Request, opt *Options) {
|
|
||||||
for i := 0; i < len(opt.Headers); i += 2 {
|
|
||||||
key := opt.Headers[i]
|
|
||||||
value := opt.Headers[i+1]
|
|
||||||
req.Header.Add(key, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the configured headers to the request if any
|
|
||||||
func (f *Fs) addHeaders(req *http.Request) {
|
|
||||||
addHeaders(req, &f.opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the directory passed in
|
// Read the directory passed in
|
||||||
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
|
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||||
URL := f.url(dir)
|
URL := f.url(dir)
|
||||||
u, err := url.Parse(URL)
|
u, err := url.Parse(URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -388,24 +298,15 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
if !strings.HasSuffix(URL, "/") {
|
if !strings.HasSuffix(URL, "/") {
|
||||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||||
}
|
}
|
||||||
// Do the request
|
res, err := f.httpClient.Get(URL)
|
||||||
req, err := http.NewRequest("GET", URL, nil)
|
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||||
if err != nil {
|
return nil, fs.ErrorDirNotFound
|
||||||
return nil, errors.Wrap(err, "readDir failed")
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
f.addHeaders(req)
|
|
||||||
res, err := f.httpClient.Do(req)
|
|
||||||
if err == nil {
|
|
||||||
defer fs.CheckClose(res.Body, &err)
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return nil, fs.ErrorDirNotFound
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to readDir")
|
return nil, errors.Wrap(err, "failed to readDir")
|
||||||
}
|
}
|
||||||
|
defer fs.CheckClose(res.Body, &err)
|
||||||
|
|
||||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||||
switch contentType {
|
switch contentType {
|
||||||
@@ -429,57 +330,33 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
names, err := f.readDir(ctx, dir)
|
names, err := f.readDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||||
}
|
}
|
||||||
var (
|
|
||||||
entriesMu sync.Mutex // to protect entries
|
|
||||||
wg sync.WaitGroup
|
|
||||||
in = make(chan string, fs.Config.Checkers)
|
|
||||||
)
|
|
||||||
add := func(entry fs.DirEntry) {
|
|
||||||
entriesMu.Lock()
|
|
||||||
entries = append(entries, entry)
|
|
||||||
entriesMu.Unlock()
|
|
||||||
}
|
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
for remote := range in {
|
|
||||||
file := &Object{
|
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
switch err := file.stat(ctx); err {
|
|
||||||
case nil:
|
|
||||||
add(file)
|
|
||||||
case fs.ErrorNotAFile:
|
|
||||||
// ...found a directory not a file
|
|
||||||
add(fs.NewDir(remote, timeUnset))
|
|
||||||
default:
|
|
||||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
isDir := name[len(name)-1] == '/'
|
isDir := name[len(name)-1] == '/'
|
||||||
name = strings.TrimRight(name, "/")
|
name = strings.TrimRight(name, "/")
|
||||||
remote := path.Join(dir, name)
|
remote := path.Join(dir, name)
|
||||||
if isDir {
|
if isDir {
|
||||||
add(fs.NewDir(remote, timeUnset))
|
dir := fs.NewDir(remote, timeUnset)
|
||||||
|
entries = append(entries, dir)
|
||||||
} else {
|
} else {
|
||||||
in <- remote
|
file := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
if err = file.stat(); err != nil {
|
||||||
|
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entries = append(entries, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(in)
|
|
||||||
wg.Wait()
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -488,12 +365,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -516,7 +393,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -526,7 +403,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
// ModTime returns the modification time of the remote http file
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime() time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -536,24 +413,9 @@ func (o *Object) url() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stat updates the info field in the Object
|
// stat updates the info field in the Object
|
||||||
func (o *Object) stat(ctx context.Context) error {
|
func (o *Object) stat() error {
|
||||||
if o.fs.opt.NoHead {
|
|
||||||
o.size = -1
|
|
||||||
o.modTime = timeUnset
|
|
||||||
o.contentType = fs.MimeType(ctx, o)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("HEAD", url, nil)
|
res, err := o.fs.httpClient.Head(url)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "stat failed")
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
o.fs.addHeaders(req)
|
|
||||||
res, err := o.fs.httpClient.Do(req)
|
|
||||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
|
||||||
return fs.ErrorObjectNotFound
|
|
||||||
}
|
|
||||||
err = statusError(res, err)
|
err = statusError(res, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to stat")
|
return errors.Wrap(err, "failed to stat")
|
||||||
@@ -565,23 +427,13 @@ func (o *Object) stat(ctx context.Context) error {
|
|||||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||||
o.modTime = t
|
o.modTime = t
|
||||||
o.contentType = res.Header.Get("Content-Type")
|
o.contentType = res.Header.Get("Content-Type")
|
||||||
// If NoSlash is set then check ContentType to see if it is a directory
|
|
||||||
if o.fs.opt.NoSlash {
|
|
||||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
|
|
||||||
}
|
|
||||||
if mediaType == "text/html" {
|
|
||||||
return fs.ErrorNotAFile
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification and access time to the specified time
|
// SetModTime sets the modification and access time to the specified time
|
||||||
//
|
//
|
||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(modTime time.Time) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -591,19 +443,17 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
// Open a remote http file object for reading. Seek is supported
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Open failed")
|
return nil, errors.Wrap(err, "Open failed")
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
|
|
||||||
// Add optional headers
|
// Add optional headers
|
||||||
for k, v := range fs.OpenOptionHeaders(options) {
|
for k, v := range fs.OpenOptionHeaders(options) {
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
o.fs.addHeaders(req)
|
|
||||||
|
|
||||||
// Do the request
|
// Do the request
|
||||||
res, err := o.fs.httpClient.Do(req)
|
res, err := o.fs.httpClient.Do(req)
|
||||||
@@ -620,27 +470,27 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
func (f *Fs) Mkdir(dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove a remote http file object
|
// Remove a remote http file object
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove() error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType(ctx context.Context) string {
|
func (o *Object) MimeType() string {
|
||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
|
// +build go1.8
|
||||||
|
|
||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -10,15 +11,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/ncw/rclone/fstest"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -27,7 +27,6 @@ var (
|
|||||||
remoteName = "TestHTTP"
|
remoteName = "TestHTTP"
|
||||||
testPath = "test"
|
testPath = "test"
|
||||||
filesPath = filepath.Join(testPath, "files")
|
filesPath = filepath.Join(testPath, "files")
|
||||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// prepareServer the test server and return a function to tidy it up afterwards
|
// prepareServer the test server and return a function to tidy it up afterwards
|
||||||
@@ -35,16 +34,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
// file server for test/files
|
// file server for test/files
|
||||||
fileServer := http.FileServer(http.Dir(filesPath))
|
fileServer := http.FileServer(http.Dir(filesPath))
|
||||||
|
|
||||||
// test the headers are there then pass on to fileServer
|
|
||||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
|
||||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
|
||||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
|
||||||
fileServer.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
|
|
||||||
// Make the test server
|
// Make the test server
|
||||||
ts := httptest.NewServer(handler)
|
ts := httptest.NewServer(fileServer)
|
||||||
|
|
||||||
// Configure the remote
|
// Configure the remote
|
||||||
config.LoadConfig()
|
config.LoadConfig()
|
||||||
@@ -55,9 +46,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
|||||||
// config.FileSet(remoteName, "url", ts.URL)
|
// config.FileSet(remoteName, "url", ts.URL)
|
||||||
|
|
||||||
m := configmap.Simple{
|
m := configmap.Simple{
|
||||||
"type": "http",
|
"type": "http",
|
||||||
"url": ts.URL,
|
"url": ts.URL,
|
||||||
"headers": strings.Join(headers, ","),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// return a function to tidy up
|
// return a function to tidy up
|
||||||
@@ -75,8 +65,8 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
|||||||
return f, tidy
|
return f, tidy
|
||||||
}
|
}
|
||||||
|
|
||||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
func testListRoot(t *testing.T, f fs.Fs) {
|
||||||
entries, err := f.List(context.Background(), "")
|
entries, err := f.List("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@@ -103,36 +93,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
|||||||
|
|
||||||
e = entries[3]
|
e = entries[3]
|
||||||
assert.Equal(t, "two.html", e.Remote())
|
assert.Equal(t, "two.html", e.Remote())
|
||||||
if noSlash {
|
assert.Equal(t, int64(7), e.Size())
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
_, ok = e.(*Object)
|
||||||
_, ok = e.(fs.Directory)
|
assert.True(t, ok)
|
||||||
assert.True(t, ok)
|
|
||||||
} else {
|
|
||||||
assert.Equal(t, int64(41), e.Size())
|
|
||||||
_, ok = e.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListRoot(t *testing.T) {
|
func TestListRoot(t *testing.T) {
|
||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
testListRoot(t, f, false)
|
testListRoot(t, f)
|
||||||
}
|
|
||||||
|
|
||||||
func TestListRootNoSlash(t *testing.T) {
|
|
||||||
f, tidy := prepare(t)
|
|
||||||
f.(*Fs).opt.NoSlash = true
|
|
||||||
defer tidy()
|
|
||||||
|
|
||||||
testListRoot(t, f, true)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListSubDir(t *testing.T) {
|
func TestListSubDir(t *testing.T) {
|
||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
entries, err := f.List(context.Background(), "three")
|
entries, err := f.List("three")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@@ -150,7 +126,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
o, err := f.NewObject("four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||||
@@ -160,7 +136,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
|
|
||||||
// Test the time is correct on the object
|
// Test the time is correct on the object
|
||||||
|
|
||||||
tObj := o.ModTime(context.Background())
|
tObj := o.ModTime()
|
||||||
|
|
||||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -168,22 +144,17 @@ func TestNewObject(t *testing.T) {
|
|||||||
|
|
||||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||||
|
|
||||||
// check object not found
|
|
||||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
|
||||||
assert.Nil(t, o)
|
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOpen(t *testing.T) {
|
func TestOpen(t *testing.T) {
|
||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
o, err := f.NewObject("four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test normal read
|
// Test normal read
|
||||||
fd, err := o.Open(context.Background())
|
fd, err := o.Open()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err := ioutil.ReadAll(fd)
|
data, err := ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -191,7 +162,7 @@ func TestOpen(t *testing.T) {
|
|||||||
assert.Equal(t, "beetroot\n", string(data))
|
assert.Equal(t, "beetroot\n", string(data))
|
||||||
|
|
||||||
// Test with range request
|
// Test with range request
|
||||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err = ioutil.ReadAll(fd)
|
data, err = ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -203,12 +174,12 @@ func TestMimeType(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
o, err := f.NewObject("four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
do, ok := o.(fs.MimeTyper)
|
do, ok := o.(fs.MimeTyper)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileRoot(t *testing.T) {
|
func TestIsAFileRoot(t *testing.T) {
|
||||||
@@ -218,7 +189,7 @@ func TestIsAFileRoot(t *testing.T) {
|
|||||||
f, err := NewFs(remoteName, "one%.txt", m)
|
f, err := NewFs(remoteName, "one%.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
testListRoot(t, f, false)
|
testListRoot(t, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileSubDir(t *testing.T) {
|
func TestIsAFileSubDir(t *testing.T) {
|
||||||
@@ -228,7 +199,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
|||||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List(context.Background(), "")
|
entries, err := f.List("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
<a href="two.html/file.txt">file.txt</a>
|
potato
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
||||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
||||||
<tr><th colspan="5"><hr></th></tr>
|
<tr><th colspan="5"><hr></th></tr>
|
||||||
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
|
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
|
||||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,11 @@
|
|||||||
package hubic
|
package hubic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/swift"
|
"github.com/ncw/swift"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// auth is an authenticator for swift
|
// auth is an authenticator for swift
|
||||||
@@ -27,7 +26,7 @@ func newAuth(f *Fs) *auth {
|
|||||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||||
const retries = 10
|
const retries = 10
|
||||||
for try := 1; try <= retries; try++ {
|
for try := 1; try <= retries; try++ {
|
||||||
err = a.f.getCredentials(context.TODO())
|
err = a.f.getCredentials()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,25 +7,22 @@ package hubic
|
|||||||
// to be revisted after some actual experience.
|
// to be revisted after some actual experience.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/swift"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
swiftLib "github.com/ncw/swift"
|
swiftLib "github.com/ncw/swift"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rclone/rclone/backend/swift"
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/config"
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -116,21 +113,18 @@ func (f *Fs) String() string {
|
|||||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||||
//
|
//
|
||||||
// The credentials are read into the Fs
|
// The credentials are read into the Fs
|
||||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
func (f *Fs) getCredentials() (err error) {
|
||||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
|
||||||
resp, err := f.client.Do(req)
|
resp, err := f.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(resp.Body, &err)
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||||
body, _ := ioutil.ReadAll(resp.Body)
|
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
|
||||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
|
||||||
}
|
}
|
||||||
decoder := json.NewDecoder(resp.Body)
|
decoder := json.NewDecoder(resp.Body)
|
||||||
var result credentials
|
var result credentials
|
||||||
|
|||||||
@@ -4,16 +4,14 @@ package hubic_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/hubic"
|
"github.com/ncw/rclone/backend/hubic"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/ncw/rclone/fstest/fstests"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestHubic:",
|
RemoteName: "TestHubic:",
|
||||||
NilObject: (*hubic.Object)(nil),
|
NilObject: (*hubic.Object)(nil),
|
||||||
SkipFsCheckWrap: true,
|
|
||||||
SkipObjectCheckWrap: true,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,10 +9,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// default time format for almost all request and responses
|
|
||||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||||
// the API server seems to use a different format
|
|
||||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||||
@@ -43,85 +40,6 @@ func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
|||||||
// Return Time string in Jottacloud format
|
// Return Time string in Jottacloud format
|
||||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||||
|
|
||||||
// APIString returns Time string in Jottacloud API format
|
|
||||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
|
||||||
|
|
||||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
|
||||||
// providers returning a token in JSON form.
|
|
||||||
type TokenJSON struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
TokenType string `json:"token_type"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
|
||||||
}
|
|
||||||
|
|
||||||
// JSON structures returned by new API
|
|
||||||
|
|
||||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
|
||||||
type AllocateFileRequest struct {
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
Created string `json:"created"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
Modified string `json:"modified"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllocateFileResponse for upload requests
|
|
||||||
type AllocateFileResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
State string `json:"state"`
|
|
||||||
UploadID string `json:"upload_id"`
|
|
||||||
UploadURL string `json:"upload_url"`
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
ResumePos int64 `json:"resume_pos"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadResponse after an upload
|
|
||||||
type UploadResponse struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Kind string `json:"kind"`
|
|
||||||
ContentID string `json:"content_id"`
|
|
||||||
Bytes int64 `json:"bytes"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
Created int64 `json:"created"`
|
|
||||||
Modified int64 `json:"modified"`
|
|
||||||
Deleted interface{} `json:"deleted"`
|
|
||||||
Mime string `json:"mime"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeviceRegistrationResponse is the response to registering a device
|
|
||||||
type DeviceRegistrationResponse struct {
|
|
||||||
ClientID string `json:"client_id"`
|
|
||||||
ClientSecret string `json:"client_secret"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
|
||||||
type CustomerInfo struct {
|
|
||||||
Username string `json:"username"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
CountryCode string `json:"country_code"`
|
|
||||||
LanguageCode string `json:"language_code"`
|
|
||||||
CustomerGroupCode string `json:"customer_group_code"`
|
|
||||||
BrandCode string `json:"brand_code"`
|
|
||||||
AccountType string `json:"account_type"`
|
|
||||||
SubscriptionType string `json:"subscription_type"`
|
|
||||||
Usage int64 `json:"usage"`
|
|
||||||
Qouta int64 `json:"quota"`
|
|
||||||
BusinessUsage int64 `json:"business_usage"`
|
|
||||||
BusinessQouta int64 `json:"business_quota"`
|
|
||||||
WriteLocked bool `json:"write_locked"`
|
|
||||||
ReadLocked bool `json:"read_locked"`
|
|
||||||
LockedCause interface{} `json:"locked_cause"`
|
|
||||||
WebHash string `json:"web_hash"`
|
|
||||||
AndroidHash string `json:"android_hash"`
|
|
||||||
IOSHash string `json:"ios_hash"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// XML structures returned by the old API
|
|
||||||
|
|
||||||
// Flag is a hacky type for checking if an attribute is present
|
// Flag is a hacky type for checking if an attribute is present
|
||||||
type Flag bool
|
type Flag bool
|
||||||
|
|
||||||
@@ -169,8 +87,8 @@ GET http://www.jottacloud.com/JFS/<account>
|
|||||||
</user>
|
</user>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// DriveInfo represents a Jottacloud account
|
// AccountInfo represents a Jottacloud account
|
||||||
type DriveInfo struct {
|
type AccountInfo struct {
|
||||||
Username string `xml:"username"`
|
Username string `xml:"username"`
|
||||||
AccountType string `xml:"account-type"`
|
AccountType string `xml:"account-type"`
|
||||||
Locked bool `xml:"locked"`
|
Locked bool `xml:"locked"`
|
||||||
@@ -315,17 +233,16 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
|||||||
|
|
||||||
// JottaFile represents a Jottacloud file
|
// JottaFile represents a Jottacloud file
|
||||||
type JottaFile struct {
|
type JottaFile struct {
|
||||||
XMLName xml.Name
|
XMLName xml.Name
|
||||||
Name string `xml:"name,attr"`
|
Name string `xml:"name,attr"`
|
||||||
Deleted Flag `xml:"deleted,attr"`
|
Deleted Flag `xml:"deleted,attr"`
|
||||||
PublicSharePath string `xml:"publicSharePath"`
|
State string `xml:"currentRevision>state"`
|
||||||
State string `xml:"currentRevision>state"`
|
CreatedAt Time `xml:"currentRevision>created"`
|
||||||
CreatedAt Time `xml:"currentRevision>created"`
|
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
Updated Time `xml:"currentRevision>updated"`
|
||||||
Updated Time `xml:"currentRevision>updated"`
|
Size int64 `xml:"currentRevision>size"`
|
||||||
Size int64 `xml:"currentRevision>size"`
|
MimeType string `xml:"currentRevision>mime"`
|
||||||
MimeType string `xml:"currentRevision>mime"`
|
MD5 string `xml:"currentRevision>md5"`
|
||||||
MD5 string `xml:"currentRevision>md5"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error is a custom Error for wrapping Jottacloud error responses
|
// Error is a custom Error for wrapping Jottacloud error responses
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user