mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
323 Commits
azure-pipe
...
fix-mega-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48fa6f5700 | ||
|
|
b4b59c53f1 | ||
|
|
77b42aa33a | ||
|
|
910c80bd02 | ||
|
|
9049bb62ca | ||
|
|
7aa2b4191c | ||
|
|
41ed33b08e | ||
|
|
f3b0f8a9f0 | ||
|
|
65a82fe77d | ||
|
|
c892a6f8ef | ||
|
|
02c777ffbf | ||
|
|
bc45f6f952 | ||
|
|
3d807ab449 | ||
|
|
5d33236050 | ||
|
|
a4d572d004 | ||
|
|
58f280b8a2 | ||
|
|
ec09de1628 | ||
|
|
6abaa9e22c | ||
|
|
e8b92f4853 | ||
|
|
50a3a96e27 | ||
|
|
8950b586c4 | ||
|
|
3f40849343 | ||
|
|
7271a404db | ||
|
|
7d0d7e66ca | ||
|
|
0cac9d9bd0 | ||
|
|
8c1edf410c | ||
|
|
1833167d10 | ||
|
|
455b9280ba | ||
|
|
45e440d356 | ||
|
|
593de059be | ||
|
|
c78d1dd18b | ||
|
|
2a82aca225 | ||
|
|
7712b780ba | ||
|
|
5c2dfeee46 | ||
|
|
572d302620 | ||
|
|
eff11b44cf | ||
|
|
15b1feea9d | ||
|
|
6337cc70d3 | ||
|
|
d210fecf3b | ||
|
|
f962fb9499 | ||
|
|
7f378ca8e3 | ||
|
|
9a5ea9c8a8 | ||
|
|
d15425e8c8 | ||
|
|
b3faee9471 | ||
|
|
5271fe3b3f | ||
|
|
7da1c84a7f | ||
|
|
cbdab14057 | ||
|
|
7b1274e29a | ||
|
|
d21ddf280c | ||
|
|
135717e12b | ||
|
|
6b55b8b133 | ||
|
|
b94b2a3723 | ||
|
|
e2914c0097 | ||
|
|
fd51f24906 | ||
|
|
4615343b73 | ||
|
|
1dc8bcd48c | ||
|
|
def411da62 | ||
|
|
f73dae1e77 | ||
|
|
77a520c97c | ||
|
|
23bf6bb4d8 | ||
|
|
04eb96b50b | ||
|
|
b9bd15a8c9 | ||
|
|
b581f2de26 | ||
|
|
5cef5f8b49 | ||
|
|
8d8fad724b | ||
|
|
4098907511 | ||
|
|
5b8a339baf | ||
|
|
3e53376a49 | ||
|
|
d122d1d191 | ||
|
|
35d6ff89bf | ||
|
|
53bec33027 | ||
|
|
3304bb7a56 | ||
|
|
f55a99218c | ||
|
|
6e053ecbd0 | ||
|
|
7e738c9d71 | ||
|
|
7689bd7e21 | ||
|
|
33f129fbbc | ||
|
|
a8adce9c59 | ||
|
|
6ae7bd7914 | ||
|
|
32af4cd6f3 | ||
|
|
ced2616da5 | ||
|
|
b90e4a8769 | ||
|
|
00b2c02bf4 | ||
|
|
33aea5d43f | ||
|
|
13d8b7979d | ||
|
|
57c1284df7 | ||
|
|
f0c2249086 | ||
|
|
6ba08b8612 | ||
|
|
c8d3e57418 | ||
|
|
d5cd026547 | ||
|
|
6c0a749a42 | ||
|
|
4b9fdb8475 | ||
|
|
dac20093c5 | ||
|
|
d211347d46 | ||
|
|
4837bc3546 | ||
|
|
69c51325bb | ||
|
|
05e4f10436 | ||
|
|
a98a750fc9 | ||
|
|
c09b62a088 | ||
|
|
a56c9ab61d | ||
|
|
97a218903c | ||
|
|
4627ac5709 | ||
|
|
1e7144eb63 | ||
|
|
f29e5b6e7d | ||
|
|
25a0e7e8aa | ||
|
|
262ba28dec | ||
|
|
74f6300875 | ||
|
|
86dcb54c38 | ||
|
|
25a0703b45 | ||
|
|
32d5af8fb6 | ||
|
|
44b603d2bd | ||
|
|
349112df6b | ||
|
|
fef8b98be2 | ||
|
|
6750af6167 | ||
|
|
8681ef36d6 | ||
|
|
ec9914205f | ||
|
|
ccecfa9cb1 | ||
|
|
c41812fc88 | ||
|
|
d98d1be3fe | ||
|
|
661dc568f3 | ||
|
|
1e4691f951 | ||
|
|
be674faff1 | ||
|
|
c68c919cea | ||
|
|
59dba1de88 | ||
|
|
49d6d6425c | ||
|
|
28cc2009d4 | ||
|
|
dd4fe9ff60 | ||
|
|
899f285319 | ||
|
|
4788545b05 | ||
|
|
1934426789 | ||
|
|
643192b347 | ||
|
|
1031bcfc5a | ||
|
|
ce00c0a0d9 | ||
|
|
1164eed2af | ||
|
|
557edecd40 | ||
|
|
b242b0a078 | ||
|
|
08b86cc94b | ||
|
|
56544bb2fd | ||
|
|
70e043e641 | ||
|
|
c49a71f438 | ||
|
|
5f07bbf8ce | ||
|
|
2f10472df3 | ||
|
|
ab89e93968 | ||
|
|
070a8bfcd8 | ||
|
|
8fe87c8157 | ||
|
|
8fb44a822d | ||
|
|
3cff258577 | ||
|
|
66347aff2a | ||
|
|
b8b12a4000 | ||
|
|
8c038326b9 | ||
|
|
fd4b25932c | ||
|
|
4374fd1df1 | ||
|
|
b6065561cf | ||
|
|
ef7bfd3f03 | ||
|
|
ae2edc3b5b | ||
|
|
0baafb158f | ||
|
|
ba121eddf0 | ||
|
|
2e80e035c9 | ||
|
|
ea9b6087cf | ||
|
|
6959c997e2 | ||
|
|
25786cafd3 | ||
|
|
23dc313fa5 | ||
|
|
1a16849df0 | ||
|
|
3b68340eac | ||
|
|
7982aaf151 | ||
|
|
7b29ed8ec1 | ||
|
|
c93e0ff8ee | ||
|
|
3b91fb6a2f | ||
|
|
7d8c15c030 | ||
|
|
bfbddab46b | ||
|
|
e09a4ff019 | ||
|
|
48e23d8c85 | ||
|
|
934440a9df | ||
|
|
29b4f211ab | ||
|
|
bd863f8868 | ||
|
|
66c23723e3 | ||
|
|
58a531a203 | ||
|
|
ba1daea072 | ||
|
|
bdcd0b4c64 | ||
|
|
94eb9a4014 | ||
|
|
e028c006fc | ||
|
|
3f3f038b73 | ||
|
|
2298834e83 | ||
|
|
07dfb3aa11 | ||
|
|
1382dba3c8 | ||
|
|
f1347139fa | ||
|
|
27a730ef8f | ||
|
|
d0c6e5cf5a | ||
|
|
cf9b973fe4 | ||
|
|
ffa1dac10b | ||
|
|
7b0966880e | ||
|
|
1c4e33d4ad | ||
|
|
530ba66d35 | ||
|
|
b3db38ae31 | ||
|
|
c0d1869204 | ||
|
|
89b6d89077 | ||
|
|
ef7b001626 | ||
|
|
f97a3e853e | ||
|
|
b71ac141cc | ||
|
|
5932acfee3 | ||
|
|
e2ce687f93 | ||
|
|
a3fb460c6b | ||
|
|
8d296d0e1d | ||
|
|
20a57aaccb | ||
|
|
50a4ed8fc4 | ||
|
|
e2b5ed6c7a | ||
|
|
16e7da2cb5 | ||
|
|
52df19ad34 | ||
|
|
693112d57e | ||
|
|
0edbc9578d | ||
|
|
7211c2dca7 | ||
|
|
af192d2507 | ||
|
|
d1a39dcc4b | ||
|
|
a6387e1f81 | ||
|
|
a992a910ef | ||
|
|
ce3340621f | ||
|
|
73e010aff9 | ||
|
|
a3faf98aa0 | ||
|
|
ed85092edb | ||
|
|
193c30d570 | ||
|
|
beb8d5c134 | ||
|
|
93810a739d | ||
|
|
5d4d5d2b07 | ||
|
|
f02fc5d5b5 | ||
|
|
eab999f631 | ||
|
|
bd61eb89bc | ||
|
|
077b45322d | ||
|
|
67fae720d7 | ||
|
|
39ae7c7ac0 | ||
|
|
f67798d73e | ||
|
|
a1ca65bd80 | ||
|
|
566aa0fca7 | ||
|
|
8159658e67 | ||
|
|
6f16588123 | ||
|
|
e339c9ff8f | ||
|
|
3247e69cf5 | ||
|
|
341d880027 | ||
|
|
941dde6940 | ||
|
|
40cc8180f0 | ||
|
|
159f2e29a8 | ||
|
|
efd826ad4b | ||
|
|
5d6593de4f | ||
|
|
82c6c77e07 | ||
|
|
badc8b3293 | ||
|
|
27a9d0f570 | ||
|
|
6ca00c21a4 | ||
|
|
b619430bcf | ||
|
|
8a0775ce3c | ||
|
|
d8e9b1a67c | ||
|
|
e0e0e0c7bd | ||
|
|
eaaf2ded94 | ||
|
|
eaeef4811f | ||
|
|
d266a171c2 | ||
|
|
df8bdf0dcb | ||
|
|
743dabf159 | ||
|
|
9f549f848d | ||
|
|
af3c47d282 | ||
|
|
ba0e1ea6ae | ||
|
|
82b3bfec3c | ||
|
|
898782ac35 | ||
|
|
4e43fa746a | ||
|
|
acc9dadcdc | ||
|
|
712f7e38f7 | ||
|
|
24161d12ab | ||
|
|
fa539b9d9b | ||
|
|
3ea82032e7 | ||
|
|
71e172a139 | ||
|
|
6929f5d6e6 | ||
|
|
c2050172aa | ||
|
|
a72ef7ca0e | ||
|
|
b84cc0cae7 | ||
|
|
93228dfcc9 | ||
|
|
eb087a3b04 | ||
|
|
ec8e0a6c58 | ||
|
|
f0e0d6cc3c | ||
|
|
752d43d6fa | ||
|
|
7c146e2618 | ||
|
|
f9ceade9b4 | ||
|
|
ae9c0e56c8 | ||
|
|
402aaca7fe | ||
|
|
106cf1852d | ||
|
|
50b8f15b5d | ||
|
|
1e7bc359be | ||
|
|
23a0332185 | ||
|
|
6812844b3d | ||
|
|
3a04d0d1a9 | ||
|
|
6f4b86e569 | ||
|
|
9aa889bfa2 | ||
|
|
8247c8a6af | ||
|
|
535f5f3c99 | ||
|
|
7f7946564d | ||
|
|
bbb8d43716 | ||
|
|
5e0a30509c | ||
|
|
cd7ca2a320 | ||
|
|
a808e98fe1 | ||
|
|
3ebcb555f4 | ||
|
|
a1263e70cf | ||
|
|
f47e5220a2 | ||
|
|
4db742dc77 | ||
|
|
3ecbd603ab | ||
|
|
0693deea1c | ||
|
|
99eaa76dc8 | ||
|
|
ba3b0a175e | ||
|
|
01c0c0b009 | ||
|
|
7d85ccb11e | ||
|
|
0c1eaf1bcb | ||
|
|
873e87fc38 | ||
|
|
33677ff367 | ||
|
|
5195075677 | ||
|
|
f396550934 | ||
|
|
6f87267b34 | ||
|
|
9d1fb2f4e7 | ||
|
|
99b3154abd | ||
|
|
6c38bddf3e | ||
|
|
a00a0471a8 | ||
|
|
9e81fc343e | ||
|
|
fdef567da6 | ||
|
|
d377842395 | ||
|
|
c014b2e66b | ||
|
|
62b769a0a7 | ||
|
|
84b5da089e | ||
|
|
d0c65b4c5e | ||
|
|
e502be475a |
@@ -1,49 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
250
.github/workflows/build.yml
vendored
Normal file
250
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
||||
|
||||
name: build
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.10
|
||||
os: ubuntu-latest
|
||||
go: '1.10.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
if: matrix.deploy && github.head_ref == ''
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
if: github.head_ref == ''
|
||||
128
.travis.yml
128
.travis.yml
@@ -1,128 +0,0 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/rclone/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||
- GO111MODULE=off
|
||||
- GITHUB_USER=ncw
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
- BUILD_FLAGS='-include "^linux/"'
|
||||
- DEPLOY=true
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.12.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.12.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||
- DEPLOY=true
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
@@ -341,6 +341,12 @@ Getting going
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `go install -tags noencode`
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
|
||||
@@ -362,19 +368,59 @@ Or if you want to run the integration tests manually:
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* If your remote defines `ListR` check with this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Writing a plugin ##
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
22
Dockerfile
Normal file
22
Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
WORKDIR /data
|
||||
ENV XDG_CONFIG_HOME=/config
|
||||
3124
MANUAL.html
generated
3124
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3426
MANUAL.txt
generated
3426
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
64
Makefile
64
Makefile
@@ -1,18 +1,29 @@
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Branch we are working on
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
# Version of last release (may not be on this branch)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
@@ -30,19 +41,22 @@ BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -50,8 +64,7 @@ version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
test: rclone test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
@@ -74,8 +87,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
@@ -190,24 +203,25 @@ serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
13
README.md
13
README.md
@@ -1,4 +1,4 @@
|
||||
[](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -6,7 +6,7 @@
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
@@ -14,6 +14,7 @@
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
@@ -21,13 +22,14 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
@@ -40,6 +42,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
@@ -52,7 +55,8 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
@@ -73,6 +77,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
78
RELEASE.md
78
RELEASE.md
@@ -1,8 +1,14 @@
|
||||
Extra required software for making a release
|
||||
# Release
|
||||
|
||||
This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
## Making a release
|
||||
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
@@ -26,8 +32,8 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* GO111MODULE=on go get -u github.com/spf13/cobra@master
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
@@ -48,24 +54,56 @@ Can be fixed with
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.49
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on docker hub. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
```
|
||||
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.49.1
|
||||
docker push rclone/rclone:1.49
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- '*'
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
GO_INSTALL_ARCH: amd64
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
BUILD_FLAGS: '-include "^linux/"'
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_amd64:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_386:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
GO_INSTALL_ARCH: 386
|
||||
BUILD_FLAGS: '-include "^windows/386" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
MAKE_COMPILE_ALL: true
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
go1.9:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GOCACHE: '' # build caching only came in go1.10
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.12
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||
echo "Latest Go version: $latestGo"
|
||||
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
- task: CacheBeta@0
|
||||
continueOnError: true
|
||||
inputs:
|
||||
key: go-build-cache | "$(Agent.JobName)"
|
||||
path: $(GOCACHE)
|
||||
displayName: Cache go build
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Libraries on Linux
|
||||
|
||||
- bash: |
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GO_INSTALL_ARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
|
||||
Write-Host "Extracting Go"
|
||||
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# Display environment for debugging
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
# Run Tests
|
||||
|
||||
- bash: |
|
||||
make
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
make check
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Code quality test
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make
|
||||
make compile_all
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Compile all architectures test
|
||||
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
|
||||
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Deploy built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
@@ -20,13 +21,17 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -38,6 +39,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
enc = encodings.AmazonCloudDrive
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
@@ -384,7 +386,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -411,7 +413,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -479,6 +481,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
if !hasValidParent {
|
||||
continue
|
||||
}
|
||||
*node.Name = enc.ToStandardName(*node.Name)
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
@@ -668,7 +671,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
start := time.Now()
|
||||
f.tokenRenewer.Start()
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
@@ -1038,7 +1041,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1158,7 +1161,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(newName)
|
||||
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
@@ -1354,10 +1357,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
name := enc.ToStandardName(*node.Name)
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + *node.Name
|
||||
path = path + "/" + name
|
||||
} else {
|
||||
path = *node.Name
|
||||
path = name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -29,10 +28,12 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
@@ -60,6 +61,8 @@ const (
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
)
|
||||
|
||||
const enc = encodings.AzureBlob
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -143,19 +146,20 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote azure server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
|
||||
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
isLimited bool // if limited to one container
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
// Object describes a azure object
|
||||
@@ -179,18 +183,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Azure container %s", f.container)
|
||||
if f.rootContainer == "" {
|
||||
return fmt.Sprintf("Azure root")
|
||||
}
|
||||
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Azure container %s", f.rootContainer)
|
||||
}
|
||||
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -198,21 +202,24 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a azure path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a azure 'url'
|
||||
func parsePath(path string) (container, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find container in azure path %q", path)
|
||||
} else {
|
||||
container, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
func (o *Object) split() (container, containerPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
@@ -317,6 +324,12 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
@@ -338,10 +351,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
}
|
||||
@@ -356,24 +365,25 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
)
|
||||
switch {
|
||||
case opt.UseEmulator:
|
||||
@@ -387,7 +397,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
@@ -400,7 +409,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
@@ -411,38 +419,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if container != "" && parts.ContainerName != container {
|
||||
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||
f.isLimited = true
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = oldRoot
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -453,6 +453,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// return the container URL for the container passed in
|
||||
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
||||
f.cntURLcacheMu.Lock()
|
||||
defer f.cntURLcacheMu.Unlock()
|
||||
var ok bool
|
||||
if containerURL, ok = f.cntURLcache[container]; !ok {
|
||||
cntURL := f.svcURL.NewContainerURL(container)
|
||||
containerURL = &cntURL
|
||||
f.cntURLcache[container] = containerURL
|
||||
}
|
||||
return containerURL
|
||||
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -482,8 +496,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
||||
return f.cntURL.NewBlobURL(f.root + remote)
|
||||
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
|
||||
return f.cntURL(container).NewBlobURL(containerPath)
|
||||
}
|
||||
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
@@ -519,16 +533,18 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
// the container and root supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
f.containerOKMu.Lock()
|
||||
deleted := f.containerDeleted
|
||||
f.containerOKMu.Unlock()
|
||||
if deleted {
|
||||
//
|
||||
// The remote has prefix removed from it and if addContainer is set then
|
||||
// it adds the container to the start.
|
||||
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
|
||||
if f.cache.IsDeleted(container) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
@@ -543,15 +559,14 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
Prefix: root,
|
||||
Prefix: directory,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -564,33 +579,24 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint
|
||||
}
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := &response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
// }
|
||||
if !strings.HasPrefix(file.Name, f.root) {
|
||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||
remote := enc.ToStandardPath(file.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
remote = remote[len(prefix):]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
@@ -600,14 +606,14 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
@@ -632,19 +638,9 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the container as being OK
|
||||
func (f *Fs) markContainerOK() {
|
||||
if f.container != "" {
|
||||
f.containerOKMu.Lock()
|
||||
f.containerOK = true
|
||||
f.containerDeleted = false
|
||||
f.containerOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -658,17 +654,24 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
f.cache.MarkOK(container)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.isLimited {
|
||||
f.cntURLcacheMu.Lock()
|
||||
for container := range f.cntURLcache {
|
||||
d := fs.NewDir(container, time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
f.cntURLcacheMu.Unlock()
|
||||
return entries, nil
|
||||
}
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
|
||||
f.cache.MarkOK(container.Name)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
@@ -688,10 +691,14 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.container == "" {
|
||||
return f.listContainers(dir)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(ctx, dir)
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -711,22 +718,43 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
container, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if container == "" {
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container := entry.Remote()
|
||||
err = listR(container, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
} else {
|
||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -776,86 +804,43 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
f.containerDeleted = false
|
||||
}
|
||||
return errors.Wrap(err, "failed to make container")
|
||||
container, _ := f.split(dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
}
|
||||
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.cache.MarkDeleted(container)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
||||
empty := true
|
||||
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -870,47 +855,42 @@ func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// deleteContainer deletes the container. It can delete a full
|
||||
// container so use isEmpty if you don't want that.
|
||||
func (f *Fs) deleteContainer() error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
ctx := context.Background()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL.Delete(ctx, options)
|
||||
}
|
||||
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Remove(container, func() error {
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL(container).Delete(ctx, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
f.containerDeleted = true
|
||||
}
|
||||
return errors.Wrap(err, "failed to delete container")
|
||||
}
|
||||
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.isEmpty(ctx, dir)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.isEmpty(ctx, container, directory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
return f.deleteContainer()
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -926,11 +906,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
dir := "" // forward compat!
|
||||
if f.root != "" || dir != "" {
|
||||
// Delegate to caller if not root container
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return f.deleteContainer()
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -943,7 +924,8 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -952,7 +934,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlobURL := f.getBlobReference(remote)
|
||||
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
|
||||
srcBlobURL := srcObj.getBlobReference()
|
||||
|
||||
source, err := url.Parse(srcBlobURL.String())
|
||||
@@ -1085,7 +1067,8 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||
return o.fs.getBlobReference(o.remote)
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlobReference(container, directory)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
@@ -1137,7 +1120,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return err
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1185,7 +1168,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
@@ -1391,7 +1374,8 @@ outer:
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
err = o.fs.Mkdir(ctx, "")
|
||||
container, _ := o.split()
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1529,4 +1513,6 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ type Timestamp time.Time
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
649
backend/b2/b2.go
649
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -104,13 +104,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
@@ -124,8 +125,8 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -149,7 +150,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
@@ -161,8 +162,8 @@ func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -191,12 +192,12 @@ func (up *largeUpload) clearUploadURL() {
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -240,8 +241,8 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
@@ -263,7 +264,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -275,8 +276,8 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -285,7 +286,7 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
@@ -295,18 +296,18 @@ func (up *largeUpload) cancel() error {
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
err := up.transferChunk(ctx, part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
@@ -316,7 +317,7 @@ func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error,
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -325,19 +326,19 @@ func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
cancelErr := up.cancel(ctx)
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
@@ -345,7 +346,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
@@ -387,16 +388,16 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
@@ -427,10 +428,10 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
@@ -202,3 +202,23 @@ type CommitUpload struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// ConfigJSON defines the shape of a box config.json
|
||||
type ConfigJSON struct {
|
||||
BoxAppSettings AppSettings `json:"boxAppSettings"`
|
||||
EnterpriseID string `json:"enterpriseID"`
|
||||
}
|
||||
|
||||
// AppSettings defines the shape of the boxAppSettings within box config.json
|
||||
type AppSettings struct {
|
||||
ClientID string `json:"clientID"`
|
||||
ClientSecret string `json:"clientSecret"`
|
||||
AppAuth AppAuth `json:"appAuth"`
|
||||
}
|
||||
|
||||
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
|
||||
type AppAuth struct {
|
||||
PublicKeyID string `json:"publicKeyID"`
|
||||
PrivateKey string `json:"privateKey"`
|
||||
Passphrase string `json:"passphrase"`
|
||||
}
|
||||
|
||||
@@ -11,8 +11,12 @@ package box
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -21,6 +25,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -28,15 +36,20 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const enc = encodings.Box
|
||||
|
||||
const (
|
||||
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
@@ -49,6 +62,7 @@ const (
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -73,9 +87,34 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -84,6 +123,19 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
@@ -98,6 +150,74 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
queryParams := map[string]string{
|
||||
"client_id": boxConfig.BoxAppSettings.ClientID,
|
||||
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
||||
}
|
||||
|
||||
return queryParams
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
@@ -181,18 +301,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for box
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -204,7 +312,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -352,7 +460,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -380,13 +488,13 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
mkdir := api.CreateFolder{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: pathID,
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -408,7 +516,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
@@ -423,7 +531,7 @@ OUTER:
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -446,7 +554,7 @@ OUTER:
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -479,7 +587,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -581,14 +689,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -619,7 +727,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -682,9 +790,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/files/" + srcObj.id + "/copy",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
@@ -692,7 +799,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -715,7 +822,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -723,14 +830,14 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
move := api.UpdateFileMove{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -762,7 +869,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
|
||||
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -845,7 +952,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move("/folders/", srcID, leaf, directoryID)
|
||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -887,7 +994,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
@@ -924,11 +1031,6 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
@@ -1006,7 +1108,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1039,7 +1141,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1051,9 +1153,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
ContentCreatedAt: api.Time(modTime),
|
||||
Parent: api.Parent{
|
||||
@@ -1078,7 +1180,7 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
opts.Path = "/files/content"
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1111,16 +1213,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
|
||||
@@ -4,6 +4,7 @@ package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -22,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
@@ -37,11 +38,11 @@ func (o *Object) createUploadSession(leaf, directoryID string, size int64) (resp
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
request.FileName = enc.FromStandardName(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -53,7 +54,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -70,7 +71,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -80,7 +81,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
@@ -104,7 +105,7 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -154,7 +155,7 @@ outer:
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
@@ -163,16 +164,16 @@ func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
@@ -183,7 +184,7 @@ func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size in
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
@@ -235,7 +236,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
@@ -263,7 +264,7 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
20
backend/cache/cache.go
vendored
20
backend/cache/cache.go
vendored
@@ -1864,6 +1864,24 @@ func cleanPath(p string) string {
|
||||
return p
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1879,4 +1897,6 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
1
backend/cache/cache_test.go
vendored
1
backend/cache/cache_test.go
vendored
@@ -19,5 +19,6 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
2199
backend/chunker/chunker.go
Normal file
2199
backend/chunker/chunker.go
Normal file
File diff suppressed because it is too large
Load Diff
605
backend/chunker/chunker_internal_test.go
Normal file
605
backend/chunker/chunker_internal_test.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
||||
)
|
||||
|
||||
// test that chunking does not break large uploads
|
||||
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
// restore original settings (f is pointer, f.opt is struct)
|
||||
f.opt = saveOpt
|
||||
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
||||
}()
|
||||
|
||||
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
||||
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
||||
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
||||
}
|
||||
|
||||
assertFormatValid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assertFormatInvalid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
}
|
||||
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
assert.Panics(t, func() {
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
|
||||
}
|
||||
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
|
||||
assert.Equal(t, wantMainName, gotMainName)
|
||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
||||
assert.Equal(t, wantXactNo, gotXactNo)
|
||||
}
|
||||
|
||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
||||
|
||||
// valid formats
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
if newFormatSupported {
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
}
|
||||
|
||||
// invalid formats
|
||||
assertFormatInvalid(`chunk-#`)
|
||||
assertFormatInvalid(`*-chunk`)
|
||||
assertFormatInvalid(`*-*-chunk-#`)
|
||||
assertFormatInvalid(`*-chunk-#-#`)
|
||||
assertFormatInvalid(`#-chunk-*`)
|
||||
assertFormatInvalid(`*/#`)
|
||||
|
||||
assertFormatValid(`*#`)
|
||||
assertFormatInvalid(`**#`)
|
||||
assertFormatInvalid(`#*`)
|
||||
assertFormatInvalid(``)
|
||||
assertFormatInvalid(`-`)
|
||||
|
||||
// quick tests
|
||||
if newFormatSupported {
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
f.opt.StartFrom = 1
|
||||
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", -1)
|
||||
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
|
||||
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
|
||||
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
}
|
||||
|
||||
// prepare format for long tests
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
f.opt.StartFrom = 2
|
||||
|
||||
// valid data chunks
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
|
||||
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
|
||||
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
|
||||
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
|
||||
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
|
||||
|
||||
// parsing invalid data chunk names
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
|
||||
|
||||
// valid control chunks
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
|
||||
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
|
||||
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
|
||||
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
|
||||
|
||||
// valid temporary control chunks
|
||||
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
|
||||
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
|
||||
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
|
||||
|
||||
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
|
||||
// parsing invalid control chunk names
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
|
||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
|
||||
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
|
||||
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
|
||||
assertMakeNamePanics("fish", -1, "in", -1)
|
||||
assertMakeNamePanics("fish", -1, "up", 4)
|
||||
assertMakeNamePanics("fish", -1, "x", -1)
|
||||
assertMakeNamePanics("fish", -1, "c", 4)
|
||||
|
||||
// base file name can sometimes look like a valid chunk name
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
// attempts to make invalid chunk names
|
||||
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", -1)
|
||||
assertMakeNamePanics("fish", -1, "info_", -1)
|
||||
assertMakeNamePanics("fish", -2, ".bind", -3)
|
||||
assertMakeNamePanics("fish", -2, "bind.", -3)
|
||||
|
||||
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", 15678)
|
||||
assertMakeNamePanics("fish", -1, "info_", 999)
|
||||
assertMakeNamePanics("fish", -2, ".bind", 0)
|
||||
assertMakeNamePanics("fish", -2, "bind.", 0)
|
||||
}
|
||||
|
||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
const dir = "small"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
checkSmallFileInternals := func(obj fs.Object) {
|
||||
assert.NotNil(t, obj)
|
||||
o, ok := obj.(*Object)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, o)
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case !f.useMeta:
|
||||
// If meta format is "none", non-chunked file (even empty)
|
||||
// internally is a single chunk without meta object.
|
||||
assert.Nil(t, o.main)
|
||||
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
case f.hashAll:
|
||||
// Consistent hashing forces meta object on small files too
|
||||
assert.NotNil(t, o.main)
|
||||
assert.True(t, o.isComposite())
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
default:
|
||||
// normally non-chunked file is kept in the Object's main field
|
||||
assert.NotNil(t, o.main)
|
||||
assert.False(t, o.isComposite())
|
||||
assert.Equal(t, 0, len(o.chunks))
|
||||
}
|
||||
}
|
||||
|
||||
checkContents := func(obj fs.Object, contents string) {
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
}
|
||||
|
||||
checkHashsum := func(obj fs.Object) {
|
||||
var ht hash.Type
|
||||
switch {
|
||||
case !f.hashAll:
|
||||
return
|
||||
case f.useMD5:
|
||||
ht = hash.MD5
|
||||
case f.useSHA1:
|
||||
ht = hash.SHA1
|
||||
default:
|
||||
return
|
||||
}
|
||||
// even empty files must have hashsum in consistent mode
|
||||
sum, err := obj.Hash(ctx, ht)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, sum, "")
|
||||
}
|
||||
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
checkHashsum(put)
|
||||
|
||||
// objects returned by Put and NewObject must have similar structure
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
checkSmallFileInternals(obj)
|
||||
checkContents(obj, contents)
|
||||
checkHashsum(obj)
|
||||
|
||||
_ = obj.Remove(ctx)
|
||||
_ = put.Remove(ctx) // for good
|
||||
}
|
||||
|
||||
checkSmallFile("emptyfile", "")
|
||||
checkSmallFile("smallfile", "Ok")
|
||||
}
|
||||
|
||||
func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "corrupted"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = true
|
||||
|
||||
contents := random.String(250)
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
const overlapMessage = "chunk overlap"
|
||||
|
||||
assertOverlapError := func(err error) {
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), overlapMessage)
|
||||
}
|
||||
}
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
assertOverlapError(err)
|
||||
|
||||
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
||||
assertOverlapError(err)
|
||||
|
||||
// you can freely read chunks (if you have an object)
|
||||
r, err := billyChunk4.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, contents, string(chunkContents))
|
||||
|
||||
// but you can't change them
|
||||
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// Remove isn't special, you can't corrupt files even if you have an object
|
||||
err = billyChunk4.Remove(ctx)
|
||||
assertOverlapError(err)
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, willyChunk)
|
||||
|
||||
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// operations.Move will return error when chunker's Move refused
|
||||
// to corrupt target file, but reverts to copy/delete method
|
||||
// still trying to delete target chunk. Chunker must come to rescue.
|
||||
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
||||
assertOverlapError(err)
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "wreaked"
|
||||
const wreakNumber = 10200300
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.Error(t, err)
|
||||
_, err = f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = wreak.Remove(ctx)
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testMetadataInput(t *testing.T, f *Fs) {
|
||||
const minChunkForTest = 50
|
||||
if f.opt.ChunkSize < minChunkForTest {
|
||||
t.Skip("this test requires chunks that fit metadata")
|
||||
}
|
||||
|
||||
const dir = "usermeta"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
||||
|
||||
o, ok := obj.(*Object)
|
||||
assert.NotNil(t, ok)
|
||||
if o != nil {
|
||||
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
||||
o = nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = obj.Remove(ctx)
|
||||
_ = part.Remove(ctx)
|
||||
}()
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
|
||||
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
||||
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
||||
runSubtest(pastMeta, "past")
|
||||
|
||||
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
||||
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
if *UploadKilobytes <= 0 {
|
||||
t.Skip("-upload-kilobytes is not set")
|
||||
}
|
||||
testPutLarge(t, f, *UploadKilobytes)
|
||||
})
|
||||
t.Run("ChunkNameFormat", func(t *testing.T) {
|
||||
testChunkNameFormat(t, f)
|
||||
})
|
||||
t.Run("SmallFileInternals", func(t *testing.T) {
|
||||
testSmallFileInternals(t, f)
|
||||
})
|
||||
t.Run("PreventCorruption", func(t *testing.T) {
|
||||
testPreventCorruption(t, f)
|
||||
})
|
||||
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
||||
testChunkNumberOverflow(t, f)
|
||||
})
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
58
backend/chunker/chunker_test.go
Normal file
58
backend/chunker/chunker_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Test the Chunker filesystem interface
|
||||
package chunker_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
"github.com/rclone/rclone/backend/chunker"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against a concrete remote
|
||||
// set by the -remote flag. If the flag is not set, it creates a
|
||||
// dynamic chunker overlay wrapping a local temporary directory.
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*chunker.Object)(nil),
|
||||
SkipBadWindowsCharacters: !*UseBadChars,
|
||||
UnimplementableObjectMethods: []string{
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
name := "TestChunker"
|
||||
opt.RemoteName = name + ":"
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -208,21 +208,6 @@ func (c *cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// check to see if the byte string is valid with no control characters
|
||||
// from 0x00 to 0x1F and is a valid UTF-8 string
|
||||
func checkValidString(buf []byte) error {
|
||||
for i := range buf {
|
||||
c := buf[i]
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
return ErrorBadDecryptControlChar
|
||||
}
|
||||
}
|
||||
if !utf8.Valid(buf) {
|
||||
return ErrorBadDecryptUTF8
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
@@ -294,10 +279,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = checkValidString(plaintext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(plaintext), err
|
||||
}
|
||||
|
||||
|
||||
@@ -44,69 +44,6 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
func TestValidString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected error
|
||||
}{
|
||||
{"", nil},
|
||||
{"\x01", ErrorBadDecryptControlChar},
|
||||
{"a\x02", ErrorBadDecryptControlChar},
|
||||
{"abc\x03", ErrorBadDecryptControlChar},
|
||||
{"abc\x04def", ErrorBadDecryptControlChar},
|
||||
{"\x05d", ErrorBadDecryptControlChar},
|
||||
{"\x06def", ErrorBadDecryptControlChar},
|
||||
{"\x07", ErrorBadDecryptControlChar},
|
||||
{"\x08", ErrorBadDecryptControlChar},
|
||||
{"\x09", ErrorBadDecryptControlChar},
|
||||
{"\x0A", ErrorBadDecryptControlChar},
|
||||
{"\x0B", ErrorBadDecryptControlChar},
|
||||
{"\x0C", ErrorBadDecryptControlChar},
|
||||
{"\x0D", ErrorBadDecryptControlChar},
|
||||
{"\x0E", ErrorBadDecryptControlChar},
|
||||
{"\x0F", ErrorBadDecryptControlChar},
|
||||
{"\x10", ErrorBadDecryptControlChar},
|
||||
{"\x11", ErrorBadDecryptControlChar},
|
||||
{"\x12", ErrorBadDecryptControlChar},
|
||||
{"\x13", ErrorBadDecryptControlChar},
|
||||
{"\x14", ErrorBadDecryptControlChar},
|
||||
{"\x15", ErrorBadDecryptControlChar},
|
||||
{"\x16", ErrorBadDecryptControlChar},
|
||||
{"\x17", ErrorBadDecryptControlChar},
|
||||
{"\x18", ErrorBadDecryptControlChar},
|
||||
{"\x19", ErrorBadDecryptControlChar},
|
||||
{"\x1A", ErrorBadDecryptControlChar},
|
||||
{"\x1B", ErrorBadDecryptControlChar},
|
||||
{"\x1C", ErrorBadDecryptControlChar},
|
||||
{"\x1D", ErrorBadDecryptControlChar},
|
||||
{"\x1E", ErrorBadDecryptControlChar},
|
||||
{"\x1F", ErrorBadDecryptControlChar},
|
||||
{"\x20", nil},
|
||||
{"\x7E", nil},
|
||||
{"\x7F", ErrorBadDecryptControlChar},
|
||||
{"£100", nil},
|
||||
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
|
||||
{"£100", nil},
|
||||
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
|
||||
{"a", nil}, // Valid ASCII
|
||||
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
|
||||
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
|
||||
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
|
||||
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
|
||||
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
|
||||
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
|
||||
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
|
||||
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
|
||||
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
|
||||
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
|
||||
} {
|
||||
actual := checkValidString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -210,8 +147,6 @@ func TestDecryptSegment(t *testing.T) {
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
|
||||
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
@@ -705,16 +640,16 @@ var (
|
||||
|
||||
// Test test infrastructure first!
|
||||
func TestRandomSource(t *testing.T) {
|
||||
source := newRandomSource(1E8)
|
||||
sink := newRandomSource(1E8)
|
||||
source := newRandomSource(1e8)
|
||||
sink := newRandomSource(1e8)
|
||||
n, err := io.Copy(sink, source)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1E8), n)
|
||||
assert.Equal(t, int64(1e8), n)
|
||||
|
||||
source = newRandomSource(1E8)
|
||||
source = newRandomSource(1e8)
|
||||
buf := make([]byte, 16)
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1E8)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
@@ -754,23 +689,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt1(t *testing.T) {
|
||||
testEncryptDecrypt(t, 1, 1E7)
|
||||
testEncryptDecrypt(t, 1, 1e7)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt32(t *testing.T) {
|
||||
testEncryptDecrypt(t, 32, 1E8)
|
||||
testEncryptDecrypt(t, 32, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt4096(t *testing.T) {
|
||||
testEncryptDecrypt(t, 4096, 1E8)
|
||||
testEncryptDecrypt(t, 4096, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65536(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65536, 1E8)
|
||||
testEncryptDecrypt(t, 65536, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65537(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65537, 1E8)
|
||||
testEncryptDecrypt(t, 65537, 1e8)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -803,7 +738,7 @@ func TestEncryptData(t *testing.T) {
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
// Check encode works
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
@@ -826,7 +761,7 @@ func TestEncryptData(t *testing.T) {
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
z := &zeroes{}
|
||||
|
||||
@@ -853,7 +788,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -885,7 +820,7 @@ func (c *closeDetector) Close() error {
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
@@ -936,7 +871,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
|
||||
@@ -802,6 +802,24 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
return newDir
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
@@ -888,6 +906,8 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -47,6 +48,8 @@ import (
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const enc = encodings.Drive
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
@@ -156,6 +159,7 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -177,7 +181,7 @@ func init() {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(opt, m, name)
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
@@ -598,11 +602,10 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
var stems []string
|
||||
if title != "" {
|
||||
searchTitle := enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle := strings.Replace(title, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
// Convert / to / for search
|
||||
searchTitle = strings.Replace(searchTitle, "/", "/", -1)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -663,18 +666,16 @@ OUTER:
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Fields(googleapi.Field(fields)).Do()
|
||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
// Convert / to / for listing purposes
|
||||
item.Name = strings.Replace(item.Name, "/", "/", -1)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
@@ -778,7 +779,7 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
|
||||
// Stop if we are running non-interactive config
|
||||
if fs.Config.AutoConfirm {
|
||||
return nil
|
||||
@@ -806,7 +807,7 @@ func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
for {
|
||||
var teamDrives *drive.TeamDriveList
|
||||
err = newPacer(opt).Call(func() (bool, error) {
|
||||
teamDrives, err = listTeamDrives.Do()
|
||||
teamDrives, err = listTeamDrives.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1209,6 +1210,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
@@ -1644,6 +1646,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
@@ -1734,7 +1737,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
}
|
||||
} else {
|
||||
// Upload the file in chunks
|
||||
info, err = f.Upload(in, size, srcMimeType, "", remote, createInfo)
|
||||
info, err = f.Upload(ctx, in, size, srcMimeType, "", remote, createInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1941,6 +1944,9 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1972,7 +1978,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.svc.Files.EmptyTrash().Do()
|
||||
err := f.svc.Files.EmptyTrash().Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1991,7 +1997,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
var about *drive.About
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Do()
|
||||
about, err = f.svc.About.Get().Fields("storageQuota").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2250,7 +2256,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
}
|
||||
}
|
||||
fs.Debugf(f, "Checking for changes on remote")
|
||||
startPageToken, err = f.changeNotifyRunner(notifyFunc, startPageToken)
|
||||
startPageToken, err = f.changeNotifyRunner(ctx, notifyFunc, startPageToken)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
@@ -2272,7 +2278,7 @@ func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
return startPageToken.StartPageToken, nil
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), startPageToken string) (newStartPageToken string, err error) {
|
||||
pageToken := startPageToken
|
||||
for {
|
||||
var changeList *drive.ChangeList
|
||||
@@ -2288,7 +2294,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
|
||||
if f.isTeamDrive {
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
changeList, err = changesCall.Do()
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2312,6 +2318,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
|
||||
|
||||
// find the new path
|
||||
if change.File != nil {
|
||||
change.File.Name = enc.ToStandardName(change.File.Name)
|
||||
changeType := fs.EntryDirectory
|
||||
if change.File.MimeType != driveFolderType {
|
||||
changeType = fs.EntryObject
|
||||
@@ -2496,7 +2503,7 @@ func (o *baseObject) Storable() bool {
|
||||
|
||||
// httpResponse gets an http.Response object for the object
|
||||
// using the url and method passed in
|
||||
func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
func (o *baseObject) httpResponse(ctx context.Context, url, method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
||||
if url == "" {
|
||||
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
||||
}
|
||||
@@ -2504,6 +2511,7 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
if err != nil {
|
||||
return req, nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
@@ -2574,8 +2582,8 @@ func isGoogleError(err error, what string) bool {
|
||||
}
|
||||
|
||||
// open a url for reading
|
||||
func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(url, "GET", options)
|
||||
func (o *baseObject) open(ctx context.Context, url string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse(ctx, url, "GET", options)
|
||||
if err != nil {
|
||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||
if o.fs.opt.AcknowledgeAbuse {
|
||||
@@ -2586,7 +2594,7 @@ func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadClose
|
||||
url += "?"
|
||||
}
|
||||
url += "acknowledgeAbuse=true"
|
||||
_, res, err = o.httpResponse(url, "GET", options)
|
||||
_, res, err = o.httpResponse(ctx, url, "GET", options)
|
||||
} else {
|
||||
err = errors.Wrap(err, "Use the --drive-acknowledge-abuse flag to download this file")
|
||||
}
|
||||
@@ -2615,7 +2623,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
o.v2Download = false
|
||||
}
|
||||
}
|
||||
return o.baseObject.open(o.url, options...)
|
||||
return o.baseObject.open(ctx, o.url, options...)
|
||||
}
|
||||
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the size with what we are reading as it can change from
|
||||
@@ -2640,7 +2648,7 @@ func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in
|
||||
if offset != 0 {
|
||||
return nil, errors.New("partial downloads are not supported while exporting Google Documents")
|
||||
}
|
||||
in, err = o.baseObject.open(o.url, options...)
|
||||
in, err = o.baseObject.open(ctx, o.url, options...)
|
||||
if in != nil {
|
||||
in = &openDocumentFile{o: o, in: in}
|
||||
}
|
||||
@@ -2675,7 +2683,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
src fs.ObjectInfo) (info *drive.File, err error) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
size := src.Size()
|
||||
@@ -2693,7 +2701,7 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io
|
||||
return
|
||||
}
|
||||
// Upload the file in chunks
|
||||
return o.fs.Upload(in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
return o.fs.Upload(ctx, in, size, uploadMimeType, o.id, o.remote, updateInfo)
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
@@ -2707,7 +2715,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MimeType: srcMimeType,
|
||||
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||
}
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2744,7 +2752,7 @@ func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.Object
|
||||
}
|
||||
updateInfo.MimeType = importMimeType
|
||||
|
||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||
info, err := o.baseObject.update(ctx, updateInfo, srcMimeType, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -50,7 +51,7 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
@@ -81,6 +82,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
@@ -106,12 +108,13 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
return rx.Upload(ctx)
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
@@ -129,8 +132,8 @@ var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
func (rx *resumableUpload) transferStatus(ctx context.Context) (start int64, err error) {
|
||||
req := rx.makeRequest(ctx, 0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -157,9 +160,9 @@ func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
@@ -192,7 +195,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
@@ -207,7 +210,7 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
|
||||
@@ -39,11 +39,13 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -52,6 +54,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Dropbox
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
@@ -102,10 +106,14 @@ var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
|
||||
// DbHashType is the hash.Type for Dropbox
|
||||
DbHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
@@ -372,14 +380,15 @@ func (f *Fs) setRoot(root string) {
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
}
|
||||
@@ -466,7 +475,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Path: enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
@@ -479,8 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.ListFolderAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
@@ -517,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Only the last element is reliably cased in PathDisplay
|
||||
entryPath := metadata.PathDisplay
|
||||
leaf := path.Base(entryPath)
|
||||
leaf := enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
@@ -575,7 +583,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// create it
|
||||
arg2 := files.CreateFolderArg{
|
||||
Path: root,
|
||||
Path: enc.FromStandardPath(root),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
@@ -601,6 +609,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
@@ -657,9 +666,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Copy
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -691,7 +703,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
@@ -720,9 +734,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -747,7 +764,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
@@ -758,7 +775,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
@@ -820,9 +838,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// ...apparently not necessary
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcPath
|
||||
arg.ToPath = dstPath
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcPath),
|
||||
ToPath: enc.FromStandardPath(dstPath),
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -863,7 +884,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -888,7 +909,7 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Dropbox {
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
@@ -975,8 +996,12 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
arg := files.DownloadArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.srv.Download(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -985,7 +1010,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
switch e := err.(type) {
|
||||
case files.DownloadAPIError:
|
||||
// Don't attempt to retry copyright violation errors
|
||||
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
@@ -1103,10 +1128,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
@@ -1131,7 +1155,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -32,7 +32,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
@@ -44,7 +44,7 @@ func (f *Fs) getDownloadToken(url string) (*GetTokenResponse, error) {
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, &token)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -72,7 +72,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, &sharedFiles)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -88,7 +88,7 @@ func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntr
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
@@ -101,17 +101,21 @@ func (f *Fs) listFiles(directoryID int) (filesList *FilesList, err error) {
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, filesList)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
item.Filename = enc.ToStandardName(item.Filename)
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error) {
|
||||
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
@@ -125,12 +129,17 @@ func (f *Fs) listFolders(directoryID int) (foldersList *FoldersList, err error)
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, foldersList)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
folder := &foldersList.SubFolders[i]
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
@@ -153,12 +162,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(folderID)
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(folderID)
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -175,7 +184,6 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
@@ -204,8 +212,8 @@ func getRemote(dir, fileName string) string {
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := enc.FromStandardName(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
@@ -220,7 +228,7 @@ func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, &request, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -232,7 +240,7 @@ func (f *Fs) makeFolder(leaf string, folderID int) (response *MakeFolderResponse
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
@@ -247,7 +255,7 @@ func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKRespons
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(&opts, request, response)
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -262,7 +270,7 @@ func (f *Fs) removeFolder(name string, folderID int) (response *GenericOKRespons
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
@@ -276,7 +284,7 @@ func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, request, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -289,7 +297,7 @@ func (f *Fs) deleteFile(url string) (response *GenericOKResponse, err error) {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -300,7 +308,7 @@ func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -312,9 +320,11 @@ func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
@@ -340,7 +350,7 @@ func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID,
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, nil)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -353,7 +363,7 @@ func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID,
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
@@ -374,7 +384,7 @@ func (f *Fs) endUpload(uploadID string, nodeurl string) (response *EndFileUpload
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(&opts, nil, response)
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -28,6 +29,8 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
const enc = encodings.Fichier
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
@@ -74,7 +77,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(folderID)
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
@@ -95,7 +98,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(leaf, folderID)
|
||||
resp, err := f.makeFolder(ctx, leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -141,8 +144,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||
root := replaceReservedChars(rootleaf)
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
@@ -251,7 +253,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(folderID)
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -298,13 +300,13 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100E9) {
|
||||
if size > int64(100e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode()
|
||||
nodeResponse, err := f.getUploadNode(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -314,12 +316,12 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(nodeResponse.ID, nodeResponse.URL)
|
||||
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -346,7 +348,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: int(fileSize),
|
||||
Size: fileSize,
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
@@ -393,7 +395,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(dir, folderID)
|
||||
_, err = f.removeFolder(ctx, dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
return o.file.Size
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
@@ -74,8 +74,8 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
downloadToken, err := o.fs.getDownloadToken(o.file.URL)
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -89,7 +89,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(&opts)
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -131,7 +131,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(o.file.URL)
|
||||
_, err := o.fs.deleteFile(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Translate file names for 1fichier
|
||||
|
||||
1Fichier reserved characters
|
||||
|
||||
The following characters are 1Fichier reserved characters, and can't
|
||||
be used in 1Fichier folder and file names.
|
||||
|
||||
*/
|
||||
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// 1Fichier has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package fichier
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{"\"'<>/\\$`", `"'<>/\$``},
|
||||
{" leading space", "␠leading space"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ type SharedFolderResponse []SharedFile
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
@@ -93,7 +93,7 @@ type File struct {
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
|
||||
@@ -17,11 +17,14 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const enc = encodings.FTP
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -62,6 +65,11 @@ func init() {
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -76,6 +84,7 @@ type Options struct {
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -141,6 +150,9 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
@@ -295,10 +307,37 @@ func translateErrorDir(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
||||
func entryToStandard(entry *ftp.Entry) {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if entry.Name == "." || entry.Name == ".." {
|
||||
return
|
||||
}
|
||||
entry.Name = enc.ToStandardName(entry.Name)
|
||||
entry.Target = enc.ToStandardPath(entry.Target)
|
||||
}
|
||||
|
||||
// dirFromStandardPath returns dir in encoded form.
|
||||
func dirFromStandardPath(dir string) string {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if dir == "." || dir == ".." {
|
||||
return dir
|
||||
}
|
||||
return enc.FromStandardPath(dir)
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
Type: ftp.EntryTypeFolder,
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
@@ -306,12 +345,13 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
@@ -366,7 +406,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
@@ -378,7 +418,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(path.Join(f.root, dir))
|
||||
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
@@ -415,6 +455,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
entryToStandard(object)
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
@@ -484,19 +525,21 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
if files[i].Name == base {
|
||||
file := files[i]
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
@@ -506,6 +549,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
@@ -527,7 +571,7 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
err = c.MakeDir(dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
@@ -563,7 +607,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
@@ -584,8 +628,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -638,8 +682,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
dirFromStandardPath(srcPath),
|
||||
dirFromStandardPath(dstPath),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -765,7 +809,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -800,7 +844,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
err = c.Stor(enc.FromStandardPath(path), in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
@@ -830,7 +874,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(path)
|
||||
err = c.Delete(enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -23,9 +23,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -34,10 +32,12 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -69,6 +69,8 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
const enc = encodings.GoogleCloudStorage
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -264,16 +266,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -298,18 +300,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("GCS root")
|
||||
}
|
||||
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -341,21 +343,24 @@ func shouldRetry(err error) (again bool, errOut error) {
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a storage 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
@@ -365,8 +370,15 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -406,22 +418,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -431,20 +440,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
}
|
||||
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -455,7 +463,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -463,7 +471,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -474,7 +482,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -485,20 +493,24 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -511,31 +523,38 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err
|
||||
}
|
||||
if !recurse {
|
||||
var object storage.Object
|
||||
for _, prefix := range objects.Prefixes {
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
continue
|
||||
}
|
||||
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix) : len(remote)-1]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
err = fn(remote, &object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if isDirectory && object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -552,32 +571,23 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -590,15 +600,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
@@ -606,14 +613,14 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
@@ -634,10 +641,14 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, dir)
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -657,22 +668,43 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -697,58 +729,55 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
bucket := storage.Bucket{
|
||||
Name: bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
return err
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
@@ -756,19 +785,16 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
@@ -786,7 +812,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -795,6 +822,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
@@ -802,13 +830,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -891,24 +919,33 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
var object *storage.Object
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(object)
|
||||
@@ -920,7 +957,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -937,15 +974,27 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
// read the complete existing object first
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -966,6 +1015,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -993,25 +1044,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Bucket: bucket,
|
||||
Name: bucketPath,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1024,8 +1076,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -60,6 +61,8 @@ var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
@@ -143,18 +146,20 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -241,7 +246,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
@@ -250,11 +256,14 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if root == "." || root == "/" {
|
||||
root = ""
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
unAuth: rest.NewClient(baseClient),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
ts: ts,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
@@ -280,6 +289,85 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// fetchEndpoint gets the openid endpoint named from the Google config
|
||||
func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, err error) {
|
||||
// Get openID config without auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
}
|
||||
|
||||
// Find userinfo endpoint
|
||||
endpoint, ok := openIDconfig[name].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// UserInfo fetches info about the current user with oauth2
|
||||
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
|
||||
endpoint, err := f.fetchEndpoint(ctx, "userinfo_endpoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch the user info with auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpoint,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
}
|
||||
return userInfo, nil
|
||||
}
|
||||
|
||||
// Disconnect kills the token and refresh token
|
||||
func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
endpoint, err := f.fetchEndpoint(ctx, "revocation_endpoint")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
token, err := f.ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Revoke the token and the refresh token
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: endpoint,
|
||||
MultipartParams: url.Values{
|
||||
"token": []string{token.AccessToken},
|
||||
"token_type_hint": []string{"access_token"},
|
||||
},
|
||||
}
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
}
|
||||
fs.Infof(f, "res = %+v", res)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -335,7 +423,7 @@ func findID(name string) string {
|
||||
|
||||
// list the albums into an internal cache
|
||||
// FIXME cache invalidation
|
||||
func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
func (f *Fs) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
f.albumsMu.Lock()
|
||||
defer f.albumsMu.Unlock()
|
||||
all, ok := f.albums[shared]
|
||||
@@ -357,7 +445,7 @@ func (f *Fs) listAlbums(shared bool) (all *albums, err error) {
|
||||
var result api.ListAlbums
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -394,7 +482,7 @@ type listFn func(remote string, object *api.MediaItem, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
func (f *Fs) list(ctx context.Context, filter api.SearchFilter, fn listFn) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:search",
|
||||
@@ -406,7 +494,7 @@ func (f *Fs) list(filter api.SearchFilter, fn listFn) (err error) {
|
||||
var result api.MediaItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &filter, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &filter, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -455,7 +543,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *api.MediaI
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
err = f.list(ctx, filter, func(remote string, item *api.MediaItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, prefix+remote, item, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -550,7 +638,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
var result api.Album
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, request, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -566,7 +654,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *api.Album, err error) {
|
||||
f.createMu.Lock()
|
||||
defer f.createMu.Unlock()
|
||||
albums, err := f.listAlbums(false)
|
||||
albums, err := f.listAlbums(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -620,7 +708,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return err
|
||||
}
|
||||
albumTitle := match[1]
|
||||
allAlbums, err := f.listAlbums(false)
|
||||
allAlbums, err := f.listAlbums(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -685,7 +773,7 @@ func (o *Object) Size() int64 {
|
||||
RootURL: o.downloadURL(),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -736,7 +824,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var item api.MediaItem
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &item)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &item)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -813,7 +901,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -866,9 +954,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var token []byte
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
@@ -899,7 +986,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
var result api.BatchCreateResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, request, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -942,7 +1029,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -963,8 +1050,10 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Disconnecter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(shared bool) (all *albums, err error)
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
@@ -296,7 +296,7 @@ func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.S
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(shared)
|
||||
albums, err := f.listAlbums(ctx, shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (f *testLister) listDir(ctx context.Context, prefix string, filter api.Sear
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(shared bool) (all *albums, err error) {
|
||||
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -46,6 +47,21 @@ func init() {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
@@ -62,6 +78,26 @@ Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
|
||||
- find its size
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -69,8 +105,10 @@ directories.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -108,6 +146,7 @@ func statusError(res *http.Response, err error) error {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -115,6 +154,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
@@ -140,10 +183,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,7 +261,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -316,8 +364,22 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func addHeaders(req *http.Request, opt *Options) {
|
||||
for i := 0; i < len(opt.Headers); i += 2 {
|
||||
key := opt.Headers[i]
|
||||
value := opt.Headers[i+1]
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func (f *Fs) addHeaders(req *http.Request) {
|
||||
addHeaders(req, &f.opt)
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
@@ -326,7 +388,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
// Do the request
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
@@ -364,34 +433,53 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for remote := range in {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
in <- remote
|
||||
}
|
||||
}
|
||||
close(in)
|
||||
wg.Wait()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -448,9 +536,21 @@ func (o *Object) url() string {
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
o.contentType = fs.MimeType(ctx, o)
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -497,11 +597,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -26,6 +27,7 @@ var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -33,8 +35,16 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(fileServer)
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
@@ -45,8 +55,9 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -26,7 +27,7 @@ func newAuth(f *Fs) *auth {
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package hubic
|
||||
// to be revisted after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -115,11 +116,12 @@ func (f *Fs) String() string {
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials() (err error) {
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -46,6 +46,82 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||
type CustomerInfo struct {
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
CountryCode string `json:"country_code"`
|
||||
LanguageCode string `json:"language_code"`
|
||||
CustomerGroupCode string `json:"customer_group_code"`
|
||||
BrandCode string `json:"brand_code"`
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
WebHash string `json:"web_hash"`
|
||||
AndroidHash string `json:"android_hash"`
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// XML structures returned by the old API
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -64,15 +140,6 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -102,8 +169,8 @@ GET http://www.jottacloud.com/JFS/<account>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
// DriveInfo represents a Jottacloud account
|
||||
type DriveInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
@@ -280,43 +347,3 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -36,6 +37,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
@@ -44,14 +47,13 @@ const (
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
@@ -78,6 +80,7 @@ func init() {
|
||||
Description: "JottaCloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
@@ -87,34 +90,9 @@ func init() {
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration api.DeviceRegistrationResponse
|
||||
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
@@ -135,53 +113,14 @@ func init() {
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
token, err := doAuth(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
@@ -195,39 +134,17 @@ func init() {
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
acc, err := getAccountInfo(srv, username)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting devices: %s", err)
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
result := config.Choose("Devices", deviceNames, nil, false)
|
||||
m.Set(configDevice, result)
|
||||
|
||||
dev, err := getDeviceInfo(srv, path.Join(username, result))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting Mountpoint: %s", err)
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
log.Fatalf("No Mountpoints found for this device.")
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
result = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
m.Set(configMountpoint, result)
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -253,7 +170,6 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
@@ -333,8 +249,169 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
return "", "", errors.New("no mountpoints for selected device")
|
||||
}
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
|
||||
return device, mountpoint, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDriveInfo queries general information about the account and the available devices and mountpoints.
|
||||
func getDriveInfo(ctx context.Context, srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: username,
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get drive info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(ctx context.Context, srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get device info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpointURL generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(path),
|
||||
@@ -342,7 +419,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
var result api.JottaFile
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -362,54 +439,6 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getAccountInfo queries general information about the account.
|
||||
// Takes rest.Client and username as parameter to be easily usable
|
||||
// during config
|
||||
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(username),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpointUrl reads the account id and generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() (err error) {
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -434,7 +463,7 @@ func urlPathEscape(in string) string {
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
@@ -442,11 +471,6 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
@@ -472,6 +496,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -511,7 +536,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
@@ -527,14 +551,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
||||
err = f.setEndpointURL()
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpointURL()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -561,7 +587,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.JottaFile) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -571,7 +597,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData(false) // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx, false) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -582,11 +608,11 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// CreateDir makes a directory
|
||||
func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) (jf *api.JottaFolder, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
@@ -598,7 +624,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
opts.Parameters.Set("mkDir", "true")
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &jf)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &jf)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -619,7 +645,6 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -628,7 +653,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -651,7 +676,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -661,14 +686,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
o, err := f.newObjectWithInfo(remote, item)
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
//fmt.Printf("Entries: %+v\n", entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -677,7 +701,7 @@ type listFileDirFn func(fs.DirEntry) error
|
||||
|
||||
// List the objects and directories into entries, from a
|
||||
// special kind of JottaFolder representing a FileDirLis
|
||||
func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
|
||||
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
|
||||
@@ -687,7 +711,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
@@ -705,8 +729,8 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
|
||||
o, err := f.newObjectWithInfo(remoteFile, file)
|
||||
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -724,17 +748,6 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -746,7 +759,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
var resp *http.Response
|
||||
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -759,7 +772,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listFileDir(dir, &result, func(entry fs.DirEntry) error {
|
||||
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -813,7 +826,7 @@ func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
_, err := f.CreateDir(dir)
|
||||
_, err := f.CreateDir(ctx, dir)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -852,14 +865,13 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't purge directory")
|
||||
}
|
||||
|
||||
// TODO: Parse response?
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -876,27 +888,23 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: src,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &info)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -925,13 +933,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("cp", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't copy file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
//return f.newObjectWithInfo(remote, &result)
|
||||
}
|
||||
|
||||
@@ -955,13 +963,13 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.copyOrMove("mv", srcObj.filePath(), remote)
|
||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't move file")
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(remote, info)
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
//return f.newObjectWithInfo(remote, result)
|
||||
}
|
||||
|
||||
@@ -999,7 +1007,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove("mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -1024,7 +1032,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.JottaFile
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(&opts, nil, &result)
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -1055,7 +1063,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
info, err := getDriveInfo(ctx, f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1095,6 +1103,11 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
@@ -1105,7 +1118,8 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData(false)
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -1128,11 +1142,12 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
// readMetaData reads and updates the metadata for an object
|
||||
func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1147,7 +1162,7 @@ func (o *Object) readMetaData(force bool) (err error) {
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(false)
|
||||
err := o.readMetaData(ctx, false)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1179,7 +1194,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Parameters.Set("mode", "bin")
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1272,7 +1287,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "allocate",
|
||||
Path: "files/v1/allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
@@ -1283,13 +1298,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, &request, &response)
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1320,7 +1335,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// send the remaining bytes
|
||||
resp, err = o.fs.apiSrv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1331,8 +1346,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
return o.readMetaData(ctx, true)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1354,7 +1369,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallXML(&opts, nil, nil)
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,12 +15,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
const enc = encodings.Koofr
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -154,6 +157,7 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
fs.FixRangeOption(options, o.Size())
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -170,13 +174,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
@@ -248,7 +245,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
return enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
@@ -299,7 +296,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
@@ -317,13 +314,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
0
backend/local/aaaa
Normal file
0
backend/local/aaaa
Normal file
9
backend/local/encode_darwin.go
Normal file
9
backend/local/encode_darwin.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalMacOS
|
||||
9
backend/local/encode_other.go
Normal file
9
backend/local/encode_other.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalUnix
|
||||
9
backend/local/encode_windows.go
Normal file
9
backend/local/encode_windows.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalWindows
|
||||
12
backend/local/fadvise_other.go
Normal file
12
backend/local/fadvise_other.go
Normal file
@@ -0,0 +1,12 @@
|
||||
//+build !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
|
||||
return f
|
||||
}
|
||||
165
backend/local/fadvise_unix.go
Normal file
165
backend/local/fadvise_unix.go
Normal file
@@ -0,0 +1,165 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fadvise provides means to automate freeing pages in kernel page cache for
|
||||
// a given file descriptor as the file is sequentially processed (read or
|
||||
// written).
|
||||
//
|
||||
// When copying a file to a remote backend all the file content is read by
|
||||
// kernel and put to page cache to make future reads faster.
|
||||
// This causes memory pressure visible in both memory usage and CPU consumption
|
||||
// and can even cause OOM errors in applications consuming large amounts memory.
|
||||
//
|
||||
// In case of an upload to a remote backend, there is no benefits from caching.
|
||||
//
|
||||
// fadvise would orchestrate calling POSIX_FADV_DONTNEED
|
||||
//
|
||||
// POSIX_FADV_DONTNEED attempts to free cached pages associated
|
||||
// with the specified region. This is useful, for example, while
|
||||
// streaming large files. A program may periodically request the
|
||||
// kernel to free cached data that has already been used, so that
|
||||
// more useful cached pages are not discarded instead.
|
||||
//
|
||||
// Requests to discard partial pages are ignored. It is
|
||||
// preferable to preserve needed data than discard unneeded data.
|
||||
// If the application requires that data be considered for
|
||||
// discarding, then offset and len must be page-aligned.
|
||||
//
|
||||
// The implementation may attempt to write back dirty pages in
|
||||
// the specified region, but this is not guaranteed. Any
|
||||
// unwritten dirty pages will not be freed. If the application
|
||||
// wishes to ensure that dirty pages will be released, it should
|
||||
// call fsync(2) or fdatasync(2) first.
|
||||
type fadvise struct {
|
||||
o *Object
|
||||
fd int
|
||||
lastPos int64
|
||||
curPos int64
|
||||
windowSize int64
|
||||
|
||||
freePagesCh chan offsetLength
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
type offsetLength struct {
|
||||
offset int64
|
||||
length int64
|
||||
}
|
||||
|
||||
const (
|
||||
defaultAllowPages = 32
|
||||
defaultWorkerQueueSize = 64
|
||||
)
|
||||
|
||||
func newFadvise(o *Object, fd int, offset int64) *fadvise {
|
||||
f := &fadvise{
|
||||
o: o,
|
||||
fd: fd,
|
||||
lastPos: offset,
|
||||
curPos: offset,
|
||||
windowSize: int64(os.Getpagesize()) * defaultAllowPages,
|
||||
|
||||
freePagesCh: make(chan offsetLength, defaultWorkerQueueSize),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
go f.worker()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// sequential configures readahead strategy in Linux kernel.
|
||||
//
|
||||
// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the
|
||||
// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles
|
||||
// this size, and POSIX_FADV_RANDOM disables file readahead entirely.
|
||||
func (f *fadvise) sequential(limit int64) bool {
|
||||
l := int64(0)
|
||||
if limit > 0 {
|
||||
l = limit
|
||||
}
|
||||
if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil {
|
||||
fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *fadvise) next(n int) {
|
||||
f.curPos += int64(n)
|
||||
f.freePagesIfNeeded()
|
||||
}
|
||||
|
||||
func (f *fadvise) freePagesIfNeeded() {
|
||||
if f.curPos >= f.lastPos+f.windowSize {
|
||||
f.freePages()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fadvise) freePages() {
|
||||
f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos}
|
||||
f.lastPos = f.curPos
|
||||
}
|
||||
|
||||
func (f *fadvise) worker() {
|
||||
for p := range f.freePagesCh {
|
||||
if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil {
|
||||
fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err)
|
||||
}
|
||||
}
|
||||
|
||||
close(f.doneCh)
|
||||
}
|
||||
|
||||
func (f *fadvise) wait() {
|
||||
close(f.freePagesCh)
|
||||
<-f.doneCh
|
||||
}
|
||||
|
||||
type fadviseReadCloser struct {
|
||||
*fadvise
|
||||
inner io.ReadCloser
|
||||
}
|
||||
|
||||
// newFadviseReadCloser wraps os.File so that reading from that file would
|
||||
// remove already consumed pages from kernel page cache.
|
||||
// In addition to that it instructs kernel to double the readahead window to
|
||||
// make sequential reads faster.
|
||||
// See also fadvise.
|
||||
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
|
||||
r := fadviseReadCloser{
|
||||
fadvise: newFadvise(o, int(f.Fd()), offset),
|
||||
inner: f,
|
||||
}
|
||||
|
||||
// If syscall failed it's likely that the subsequent syscalls to that
|
||||
// file descriptor would also fail. In that case return the provided os.File
|
||||
// pointer.
|
||||
if !r.sequential(limit) {
|
||||
r.wait()
|
||||
return f
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (f fadviseReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = f.inner.Read(p)
|
||||
f.next(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (f fadviseReadCloser) Close() error {
|
||||
f.freePages()
|
||||
f.wait()
|
||||
return f.inner.Close()
|
||||
}
|
||||
@@ -142,19 +142,19 @@ type Fs struct {
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
remote string // The remote path (encoded path)
|
||||
path string // The local path (OS path)
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
@@ -183,17 +183,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
f.root = f.cleanPath(root)
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
}).Fill(f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -234,12 +234,12 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
return enc.ToStandardPath(filepath.ToSlash(f.root))
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
return fmt.Sprintf("Local file system at %s", f.Root())
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -267,33 +267,27 @@ func (f *Fs) caseInsensitive() bool {
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
localPath := f.localPath(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
// Possibly receive a new name for localPath
|
||||
localPath, translatedLink = translateLink(remote, localPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
path: localPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
@@ -301,8 +295,8 @@ func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote, dstPath)
|
||||
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote)
|
||||
if info != nil {
|
||||
o.setMetadata(info)
|
||||
} else {
|
||||
@@ -331,7 +325,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, "", nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -344,10 +338,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -409,11 +400,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
@@ -430,7 +421,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
d := fs.NewDir(newRemote, fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
@@ -438,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -451,67 +442,28 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// cleanRemote makes string a valid UTF-8 string for remote strings.
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
// It also normalises the UTF-8 and converts the slashes if necessary.
|
||||
func (f *Fs) cleanRemote(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
remote = path.Join(dir, enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
f.warnedMu.Lock()
|
||||
if _, ok := f.warned[remote]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote)
|
||||
f.warned[remote] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
name = string([]rune(name))
|
||||
f.warnedMu.Unlock()
|
||||
}
|
||||
name = filepath.ToSlash(name)
|
||||
return name
|
||||
return
|
||||
}
|
||||
|
||||
// mapper maps raw to cleaned directory names
|
||||
type mapper struct {
|
||||
mu sync.RWMutex // mutex to protect the below
|
||||
m map[string]string // map of un-normalised directory names
|
||||
}
|
||||
|
||||
func newMapper() *mapper {
|
||||
return &mapper{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup a directory name to make a local name (reverses
|
||||
// cleanDirName)
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Load(in string) string {
|
||||
m.mu.RLock()
|
||||
out, ok := m.m[in]
|
||||
m.mu.RUnlock()
|
||||
if ok {
|
||||
return out
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// Cleans a directory name recording if it needed to be altered
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Save(in, out string) string {
|
||||
if in != out {
|
||||
m.mu.Lock()
|
||||
m.m[out] = in
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return out
|
||||
func (f *Fs) localPath(name string) string {
|
||||
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote, "")
|
||||
o := f.newObject(src.Remote())
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -527,13 +479,13 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
err := os.MkdirAll(root, 0777)
|
||||
localPath := f.localPath(dir)
|
||||
err := os.MkdirAll(localPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == "" {
|
||||
fi, err := f.lstat(root)
|
||||
fi, err := f.lstat(localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -546,8 +498,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
return os.Remove(root)
|
||||
return os.Remove(f.localPath(dir))
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
@@ -643,7 +594,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := f.newObject(remote, "")
|
||||
dstObj := f.newObject(remote)
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
@@ -700,8 +651,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
|
||||
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
|
||||
srcPath := srcFs.localPath(srcRemote)
|
||||
dstPath := f.localPath(dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := os.Lstat(dstPath)
|
||||
@@ -735,7 +686,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -777,7 +728,11 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
in, err = file.Open(o.path)
|
||||
var fd *os.File
|
||||
fd, err = file.Open(o.path)
|
||||
if fd != nil {
|
||||
in = newFadviseReadCloser(o, fd, 0, 0)
|
||||
}
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
@@ -831,13 +786,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
// Check for control characters in the remote name and show non storable
|
||||
for _, c := range o.Remote() {
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
|
||||
return false
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
@@ -913,7 +861,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
hashes := hash.Supported
|
||||
var hasher *hash.MultiHasher
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -921,7 +869,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.size)
|
||||
case *fs.HashesOption:
|
||||
hashes = x.Hashes
|
||||
if x.Hashes.Count() > 0 {
|
||||
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -938,22 +891,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return wrappedFd, err
|
||||
}
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if hasher == nil {
|
||||
// no need to wrap since we don't need checksums
|
||||
return wrappedFd, nil
|
||||
}
|
||||
// Update the md5sum as we go along
|
||||
// Update the hashes as we go along
|
||||
in = &localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
hash: hash,
|
||||
hash: hasher,
|
||||
fd: fd,
|
||||
}
|
||||
return in, nil
|
||||
@@ -975,18 +928,23 @@ func (nwc nopWriterCloser) Close() error {
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
var out io.WriteCloser
|
||||
var hasher *hash.MultiHasher
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.HashesOption:
|
||||
hashes = x.Hashes
|
||||
if x.Hashes.Count() > 0 {
|
||||
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := o.mkdirAll()
|
||||
err = o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1011,11 +969,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
if hasher != nil {
|
||||
in = io.TeeReader(in, hasher)
|
||||
}
|
||||
in = io.TeeReader(in, hash)
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
closeErr := out.Close()
|
||||
@@ -1051,9 +1007,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// All successful so update the hashes
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hash.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
if hasher != nil {
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hasher.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
|
||||
// Set the mtime
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
@@ -1072,7 +1030,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// Temporary Object under construction
|
||||
o := f.newObject(remote, "")
|
||||
o := f.newObject(remote)
|
||||
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
@@ -1124,49 +1082,32 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
// bigger path and not necessarily absolute
|
||||
func cleanPathFragment(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
s = filepath.Clean(s)
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
// cleanPath cleans and makes absolute the path passed in and returns
|
||||
// an OS path.
|
||||
//
|
||||
// The input might be in OS form or rclone form or a mixture, but the
|
||||
// output is in OS form.
|
||||
//
|
||||
// On windows it makes the path UNC also and replaces any characters
|
||||
// Windows can't deal with with their replacements.
|
||||
func (f *Fs) cleanPath(s string) string {
|
||||
s = cleanPathFragment(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.opt.NoUNC {
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
s = cleanWindowsName(f, s)
|
||||
} else {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1175,63 +1116,21 @@ var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
func uncPath(l string) string {
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
if strings.HasPrefix(l, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
if strings.HasPrefix(l, `\\?\`) {
|
||||
return l
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(s) {
|
||||
return `\\?\` + s
|
||||
if isAbsWinDrive.MatchString(l) {
|
||||
return `\\?\` + l
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters replacing them with _
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
}
|
||||
return name2
|
||||
return l
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -25,19 +25,6 @@ func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMapper(t *testing.T) {
|
||||
m := newMapper()
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "potato", m.Save("potato", "potato"))
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
|
||||
assert.Equal(t, m.m, map[string]string{
|
||||
"-r'áö": "-r?'a´o¨",
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
@@ -57,7 +44,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported())
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
|
||||
@@ -1,29 +1,26 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
|
||||
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
|
||||
"C:\\Windows\\Folder",
|
||||
"\\\\?\\C:\\Windows\\Folder",
|
||||
"//?/C:/Windows/Folder",
|
||||
"\\\\?\\UNC\\server\\share\\Desktop",
|
||||
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop",
|
||||
"\\\\?\\UNC\\\\share\\folder\\Desktop",
|
||||
"\\\\server\\share",
|
||||
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\server\share`,
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
@@ -51,38 +48,23 @@ func TestUncPaths(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var utf8Tests = [][2]string{
|
||||
{"ABC", "ABC"},
|
||||
{string([]byte{0x80}), "<22>"},
|
||||
{string([]byte{'a', 0x80, 'b'}), "a<>b"},
|
||||
}
|
||||
|
||||
func TestCleanRemote(t *testing.T) {
|
||||
f := &Fs{}
|
||||
f.warned = make(map[string]struct{})
|
||||
for _, test := range utf8Tests {
|
||||
got := f.cleanRemote(test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
{`c:\temp`, `c:\temp`},
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
|
||||
{"c:/temp", "c:/temp"},
|
||||
{"/temp/file.txt", "/temp/file.txt"},
|
||||
{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
|
||||
{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`c:/temp`, `c:\temp`},
|
||||
{`/temp/file.txt`, `\temp\file.txt`},
|
||||
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`},
|
||||
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
for _, test := range testsWindows {
|
||||
got := cleanWindowsName(nil, test[0])
|
||||
got := cleanRootPath(test[0], true)
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
|
||||
107
backend/mailru/api/bin.go
Normal file
107
backend/mailru/api/bin.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package api
|
||||
|
||||
// BIN protocol constants
|
||||
const (
|
||||
BinContentType = "application/x-www-form-urlencoded"
|
||||
TreeIDLength = 12
|
||||
DunnoNodeIDLength = 16
|
||||
)
|
||||
|
||||
// Operations in binary protocol
|
||||
const (
|
||||
OperationAddFile = 103 // 0x67
|
||||
OperationRename = 105 // 0x69
|
||||
OperationCreateFolder = 106 // 0x6A
|
||||
OperationFolderList = 117 // 0x75
|
||||
OperationSharedFoldersList = 121 // 0x79
|
||||
// TODO investigate opcodes below
|
||||
Operation154MaybeItemInfo = 154 // 0x9A
|
||||
Operation102MaybeAbout = 102 // 0x66
|
||||
Operation104MaybeDelete = 104 // 0x68
|
||||
)
|
||||
|
||||
// CreateDir protocol constants
|
||||
const (
|
||||
MkdirResultOK = 0
|
||||
MkdirResultSourceNotExists = 1
|
||||
MkdirResultAlreadyExists = 4
|
||||
MkdirResultExistsDifferentCase = 9
|
||||
MkdirResultInvalidName = 10
|
||||
MkdirResultFailed254 = 254
|
||||
)
|
||||
|
||||
// Move result codes
|
||||
const (
|
||||
MoveResultOK = 0
|
||||
MoveResultSourceNotExists = 1
|
||||
MoveResultFailed002 = 2
|
||||
MoveResultAlreadyExists = 4
|
||||
MoveResultFailed005 = 5
|
||||
MoveResultFailed254 = 254
|
||||
)
|
||||
|
||||
// AddFile result codes
|
||||
const (
|
||||
AddResultOK = 0
|
||||
AddResultError01 = 1
|
||||
AddResultDunno04 = 4
|
||||
AddResultWrongPath = 5
|
||||
AddResultNoFreeSpace = 7
|
||||
AddResultDunno09 = 9
|
||||
AddResultInvalidName = 10
|
||||
AddResultNotModified = 12
|
||||
AddResultFailedA = 253
|
||||
AddResultFailedB = 254
|
||||
)
|
||||
|
||||
// List request options
|
||||
const (
|
||||
ListOptTotalSpace = 1
|
||||
ListOptDelete = 2
|
||||
ListOptFingerprint = 4
|
||||
ListOptUnknown8 = 8
|
||||
ListOptUnknown16 = 16
|
||||
ListOptFolderSize = 32
|
||||
ListOptUsedSpace = 64
|
||||
ListOptUnknown128 = 128
|
||||
ListOptUnknown256 = 256
|
||||
)
|
||||
|
||||
// ListOptDefaults ...
|
||||
const ListOptDefaults = ListOptUnknown128 | ListOptUnknown256 | ListOptFolderSize | ListOptTotalSpace | ListOptUsedSpace
|
||||
|
||||
// List parse flags
|
||||
const (
|
||||
ListParseDone = 0
|
||||
ListParseReadItem = 1
|
||||
ListParsePin = 2
|
||||
ListParsePinUpper = 3
|
||||
ListParseUnknown15 = 15
|
||||
)
|
||||
|
||||
// List operation results
|
||||
const (
|
||||
ListResultOK = 0
|
||||
ListResultNotExists = 1
|
||||
ListResultDunno02 = 2
|
||||
ListResultDunno03 = 3
|
||||
ListResultAlreadyExists04 = 4
|
||||
ListResultDunno05 = 5
|
||||
ListResultDunno06 = 6
|
||||
ListResultDunno07 = 7
|
||||
ListResultDunno08 = 8
|
||||
ListResultAlreadyExists09 = 9
|
||||
ListResultDunno10 = 10
|
||||
ListResultDunno11 = 11
|
||||
ListResultDunno12 = 12
|
||||
ListResultFailedB = 253
|
||||
ListResultFailedA = 254
|
||||
)
|
||||
|
||||
// Directory item types
|
||||
const (
|
||||
ListItemMountPoint = 0
|
||||
ListItemFile = 1
|
||||
ListItemFolder = 2
|
||||
ListItemSharedFolder = 3
|
||||
)
|
||||
225
backend/mailru/api/helpers.go
Normal file
225
backend/mailru/api/helpers.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package api
|
||||
|
||||
// BIN protocol helpers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// protocol errors
|
||||
var (
|
||||
ErrorPrematureEOF = errors.New("Premature EOF")
|
||||
ErrorInvalidLength = errors.New("Invalid length")
|
||||
ErrorZeroTerminate = errors.New("String must end with zero")
|
||||
)
|
||||
|
||||
// BinWriter is a binary protocol writer
|
||||
type BinWriter struct {
|
||||
b *bytes.Buffer // growing byte buffer
|
||||
a []byte // temporary buffer for next varint
|
||||
}
|
||||
|
||||
// NewBinWriter creates a binary protocol helper
|
||||
func NewBinWriter() *BinWriter {
|
||||
return &BinWriter{
|
||||
b: new(bytes.Buffer),
|
||||
a: make([]byte, binary.MaxVarintLen64),
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns binary data
|
||||
func (w *BinWriter) Bytes() []byte {
|
||||
return w.b.Bytes()
|
||||
}
|
||||
|
||||
// Reader returns io.Reader with binary data
|
||||
func (w *BinWriter) Reader() io.Reader {
|
||||
return bytes.NewReader(w.b.Bytes())
|
||||
}
|
||||
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
w.WritePu64(int64(len(buf) + 1))
|
||||
w.b.Write(buf)
|
||||
w.b.WriteByte(0)
|
||||
}
|
||||
|
||||
// Write writes a byte buffer
|
||||
func (w *BinWriter) Write(buf []byte) {
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// WriteWithLength writes a byte buffer prepended with its length as varint
|
||||
func (w *BinWriter) WriteWithLength(buf []byte) {
|
||||
w.WritePu64(int64(len(buf)))
|
||||
w.b.Write(buf)
|
||||
}
|
||||
|
||||
// BinReader is a binary protocol reader helper
|
||||
type BinReader struct {
|
||||
b *bufio.Reader
|
||||
count *readers.CountingReader
|
||||
err error // keeps the first error encountered
|
||||
}
|
||||
|
||||
// NewBinReader creates a binary protocol reader helper
|
||||
func NewBinReader(reader io.Reader) *BinReader {
|
||||
r := &BinReader{}
|
||||
r.count = readers.NewCountingReader(reader)
|
||||
r.b = bufio.NewReader(r.count)
|
||||
return r
|
||||
}
|
||||
|
||||
// Count returns number of bytes read
|
||||
func (r *BinReader) Count() uint64 {
|
||||
return r.count.BytesRead()
|
||||
}
|
||||
|
||||
// Error returns first encountered error or nil
|
||||
func (r *BinReader) Error() error {
|
||||
return r.err
|
||||
}
|
||||
|
||||
// check() keeps the first error encountered in a stream
|
||||
func (r *BinReader) check(err error) bool {
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if r.err == nil {
|
||||
// keep the first error
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ReadByteAsInt reads a single byte as uint32, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsInt() int {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadByteAsShort reads a single byte as uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadByteAsShort() int16 {
|
||||
if octet, err := r.b.ReadByte(); r.check(err) {
|
||||
return int16(octet)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadIntSpl reads two bytes as little-endian uint16, returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadIntSpl() int {
|
||||
var val uint16
|
||||
if r.check(binary.Read(r.b, binary.LittleEndian, &val)) {
|
||||
return int(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadULong returns uint64 equivalent of -1 for EOF or errors
|
||||
func (r *BinReader) ReadULong() uint64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return val
|
||||
}
|
||||
return 0xffffffffffffffff
|
||||
}
|
||||
|
||||
// ReadPu32 returns -1 for EOF or errors
|
||||
func (r *BinReader) ReadPu32() int64 {
|
||||
if val, err := binary.ReadUvarint(r.b); r.check(err) {
|
||||
return int64(val)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadNBytes reads given number of bytes, returns invalid data for EOF or errors
|
||||
func (r *BinReader) ReadNBytes(len int) []byte {
|
||||
buf := make([]byte, len)
|
||||
n, err := r.b.Read(buf)
|
||||
if r.check(err) {
|
||||
return buf
|
||||
}
|
||||
if n != len {
|
||||
r.check(ErrorPrematureEOF)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// ReadBytesByLength reads buffer length and its bytes
|
||||
func (r *BinReader) ReadBytesByLength() []byte {
|
||||
len := r.ReadPu32()
|
||||
if len < 0 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return []byte{}
|
||||
}
|
||||
return r.ReadNBytes(int(len))
|
||||
}
|
||||
|
||||
// ReadString reads a zero-terminated string with length
|
||||
func (r *BinReader) ReadString() string {
|
||||
len := int(r.ReadPu32())
|
||||
if len < 1 {
|
||||
r.check(ErrorInvalidLength)
|
||||
return ""
|
||||
}
|
||||
buf := make([]byte, len-1)
|
||||
n, err := r.b.Read(buf)
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if n != len-1 {
|
||||
r.check(ErrorPrematureEOF)
|
||||
return ""
|
||||
}
|
||||
zeroByte, err := r.b.ReadByte()
|
||||
if !r.check(err) {
|
||||
return ""
|
||||
}
|
||||
if zeroByte != 0 {
|
||||
r.check(ErrorZeroTerminate)
|
||||
return ""
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// ReadDate reads a Unix encoded time
|
||||
func (r *BinReader) ReadDate() time.Time {
|
||||
return time.Unix(r.ReadPu32(), 0)
|
||||
}
|
||||
248
backend/mailru/api/m1.go
Normal file
248
backend/mailru/api/m1.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// M1 protocol constants and structures
|
||||
const (
|
||||
APIServerURL = "https://cloud.mail.ru"
|
||||
PublicLinkURL = "https://cloud.mail.ru/public/"
|
||||
DispatchServerURL = "https://dispatcher.cloud.mail.ru"
|
||||
OAuthURL = "https://o2.mail.ru/token"
|
||||
OAuthClientID = "cloud-win"
|
||||
)
|
||||
|
||||
// ServerErrorResponse represents erroneous API response.
|
||||
type ServerErrorResponse struct {
|
||||
Message string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
func (e *ServerErrorResponse) Error() string {
|
||||
return fmt.Sprintf("server error %d (%s)", e.Status, e.Message)
|
||||
}
|
||||
|
||||
// FileErrorResponse represents erroneous API response for a file
|
||||
type FileErrorResponse struct {
|
||||
Body struct {
|
||||
Home struct {
|
||||
Value string `json:"value"`
|
||||
Error string `json:"error"`
|
||||
} `json:"home"`
|
||||
} `json:"body"`
|
||||
Status int `json:"status"`
|
||||
Account string `json:"email,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Message string // non-json, calculated field
|
||||
}
|
||||
|
||||
func (e *FileErrorResponse) Error() string {
|
||||
return fmt.Sprintf("file error %d (%s)", e.Status, e.Body.Home.Error)
|
||||
}
|
||||
|
||||
// UserInfoResponse contains account metadata
|
||||
type UserInfoResponse struct {
|
||||
Body struct {
|
||||
AccountType string `json:"account_type"`
|
||||
AccountVerified bool `json:"account_verified"`
|
||||
Cloud struct {
|
||||
Beta struct {
|
||||
Allowed bool `json:"allowed"`
|
||||
Asked bool `json:"asked"`
|
||||
} `json:"beta"`
|
||||
Billing struct {
|
||||
ActiveCostID string `json:"active_cost_id"`
|
||||
ActiveRateID string `json:"active_rate_id"`
|
||||
AutoProlong bool `json:"auto_prolong"`
|
||||
Basequota int64 `json:"basequota"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Expires int `json:"expires"`
|
||||
Prolong bool `json:"prolong"`
|
||||
Promocodes struct {
|
||||
} `json:"promocodes"`
|
||||
Subscription []interface{} `json:"subscription"`
|
||||
Version string `json:"version"`
|
||||
} `json:"billing"`
|
||||
Bonuses struct {
|
||||
CameraUpload bool `json:"camera_upload"`
|
||||
Complete bool `json:"complete"`
|
||||
Desktop bool `json:"desktop"`
|
||||
Feedback bool `json:"feedback"`
|
||||
Links bool `json:"links"`
|
||||
Mobile bool `json:"mobile"`
|
||||
Registration bool `json:"registration"`
|
||||
} `json:"bonuses"`
|
||||
Enable struct {
|
||||
Sharing bool `json:"sharing"`
|
||||
} `json:"enable"`
|
||||
FileSizeLimit int64 `json:"file_size_limit"`
|
||||
Space struct {
|
||||
BytesTotal int64 `json:"bytes_total"`
|
||||
BytesUsed int `json:"bytes_used"`
|
||||
Overquota bool `json:"overquota"`
|
||||
} `json:"space"`
|
||||
} `json:"cloud"`
|
||||
Cloudflags struct {
|
||||
Exists bool `json:"exists"`
|
||||
} `json:"cloudflags"`
|
||||
Domain string `json:"domain"`
|
||||
Login string `json:"login"`
|
||||
Newbie bool `json:"newbie"`
|
||||
UI struct {
|
||||
ExpandLoader bool `json:"expand_loader"`
|
||||
Kind string `json:"kind"`
|
||||
Sidebar bool `json:"sidebar"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Thumbs bool `json:"thumbs"`
|
||||
} `json:"ui"`
|
||||
} `json:"body"`
|
||||
Email string `json:"email"`
|
||||
Status int `json:"status"`
|
||||
Time int64 `json:"time"`
|
||||
}
|
||||
|
||||
// ListItem ...
|
||||
type ListItem struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count,omitempty"`
|
||||
Kind string `json:"kind"`
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Home string `json:"home"`
|
||||
Size int64 `json:"size"`
|
||||
Mtime int64 `json:"mtime,omitempty"`
|
||||
Hash string `json:"hash,omitempty"`
|
||||
VirusScan string `json:"virus_scan,omitempty"`
|
||||
Tree string `json:"tree,omitempty"`
|
||||
Grev int `json:"grev,omitempty"`
|
||||
Rev int `json:"rev,omitempty"`
|
||||
}
|
||||
|
||||
// ItemInfoResponse ...
|
||||
type ItemInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body ListItem `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// FolderInfoResponse ...
|
||||
type FolderInfoResponse struct {
|
||||
Body struct {
|
||||
Count struct {
|
||||
Folders int `json:"folders"`
|
||||
Files int `json:"files"`
|
||||
} `json:"count"`
|
||||
Tree string `json:"tree"`
|
||||
Name string `json:"name"`
|
||||
Grev int `json:"grev"`
|
||||
Size int64 `json:"size"`
|
||||
Sort struct {
|
||||
Order string `json:"order"`
|
||||
Type string `json:"type"`
|
||||
} `json:"sort"`
|
||||
Kind string `json:"kind"`
|
||||
Rev int `json:"rev"`
|
||||
Type string `json:"type"`
|
||||
Home string `json:"home"`
|
||||
List []ListItem `json:"list"`
|
||||
} `json:"body,omitempty"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
// ShardInfoResponse ...
|
||||
type ShardInfoResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body struct {
|
||||
Video []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"video"`
|
||||
ViewDirect []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view_direct"`
|
||||
WeblinkView []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_view"`
|
||||
WeblinkVideo []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_video"`
|
||||
WeblinkGet []struct {
|
||||
Count int `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_get"`
|
||||
Stock []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"stock"`
|
||||
WeblinkThumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"weblink_thumbnails"`
|
||||
PublicUpload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"public_upload"`
|
||||
Auth []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"auth"`
|
||||
Web []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"web"`
|
||||
View []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"view"`
|
||||
Upload []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"upload"`
|
||||
Get []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"get"`
|
||||
Thumbnails []struct {
|
||||
Count string `json:"count"`
|
||||
URL string `json:"url"`
|
||||
} `json:"thumbnails"`
|
||||
} `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// CleanupResponse ...
|
||||
type CleanupResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
StatusStr string `json:"status"`
|
||||
}
|
||||
|
||||
// GenericResponse ...
|
||||
type GenericResponse struct {
|
||||
Email string `json:"email"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
// ignore other fields
|
||||
}
|
||||
|
||||
// GenericBodyResponse ...
|
||||
type GenericBodyResponse struct {
|
||||
Email string `json:"email"`
|
||||
Body string `json:"body"`
|
||||
Time int64 `json:"time"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
2390
backend/mailru/mailru.go
Normal file
2390
backend/mailru/mailru.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/mailru/mailru_test.go
Normal file
18
backend/mailru/mailru_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Test Mailru filesystem interface
|
||||
package mailru_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestMailru:",
|
||||
NilObject: (*mailru.Object)(nil),
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
134
backend/mailru/mrhash/mrhash.go
Normal file
134
backend/mailru/mrhash/mrhash.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package mrhash implements the mailru hash, which is a modified SHA1.
|
||||
// If file size is less than or equal to the SHA1 block size (20 bytes),
|
||||
// its hash is simply its data right-padded with zero bytes.
|
||||
// Hash sum of a larger file is computed as a SHA1 sum of the file data
|
||||
// bytes concatenated with a decimal representation of the data length.
|
||||
package mrhash
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = sha1.BlockSize
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha1.Size
|
||||
startString = "mrCloud"
|
||||
hashError = "hash function returned error"
|
||||
)
|
||||
|
||||
// Global errors
|
||||
var (
|
||||
ErrorInvalidHash = errors.New("invalid hash")
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
total int // bytes written into hash so far
|
||||
sha hash.Hash // underlying SHA1
|
||||
small []byte // small content
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the Mailru checksum.
|
||||
func New() hash.Hash {
|
||||
d := &digest{}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n, err = d.sha.Write(p)
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
d.total += n
|
||||
if d.total <= Size {
|
||||
d.small = append(d.small, p...)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
// If content is small, return it padded to Size
|
||||
if d.total <= Size {
|
||||
padded := make([]byte, Size)
|
||||
copy(padded, d.small)
|
||||
return append(b, padded...)
|
||||
}
|
||||
endString := strconv.Itoa(d.total)
|
||||
copy, err := cloneSHA1(d.sha)
|
||||
if err == nil {
|
||||
_, err = copy.Write([]byte(endString))
|
||||
}
|
||||
if err != nil {
|
||||
panic(hashError)
|
||||
}
|
||||
return copy.Sum(b)
|
||||
}
|
||||
|
||||
// cloneSHA1 clones state of SHA1 hash
|
||||
func cloneSHA1(orig hash.Hash) (clone hash.Hash, err error) {
|
||||
state, err := orig.(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clone = sha1.New()
|
||||
err = clone.(encoding.BinaryUnmarshaler).UnmarshalBinary(state)
|
||||
return
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() {
|
||||
d.sha = sha1.New()
|
||||
_, _ = d.sha.Write([]byte(startString))
|
||||
d.total = 0
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int {
|
||||
return Size
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int {
|
||||
return BlockSize
|
||||
}
|
||||
|
||||
// Sum returns the Mailru checksum of the data.
|
||||
func Sum(data []byte) []byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
_, _ = d.Write(data)
|
||||
return d.Sum(nil)
|
||||
}
|
||||
|
||||
// DecodeString converts a string to the Mailru hash
|
||||
func DecodeString(s string) ([]byte, error) {
|
||||
b, err := hex.DecodeString(s)
|
||||
if err != nil || len(b) != Size {
|
||||
return nil, ErrorInvalidHash
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// must implement this interface
|
||||
var (
|
||||
_ hash.Hash = (*digest)(nil)
|
||||
)
|
||||
81
backend/mailru/mrhash/mrhash_test.go
Normal file
81
backend/mailru/mrhash/mrhash_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package mrhash_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/mailru/mrhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
want string
|
||||
}{
|
||||
{0, "0000000000000000000000000000000000000000"},
|
||||
{1, "4100000000000000000000000000000000000000"},
|
||||
{2, "4141000000000000000000000000000000000000"},
|
||||
{19, "4141414141414141414141414141414141414100"},
|
||||
{20, "4141414141414141414141414141414141414141"},
|
||||
{21, "eb1d05e78a18691a5aa196a6c2b60cd40b5faafb"},
|
||||
{22, "037e6d960601118a0639afbeff30fe716c66ed2d"},
|
||||
{4096, "45a16aa192502b010280fb5b44274c601a91fd9f"},
|
||||
{4194303, "fa019d5bd26498cf6abe35e0d61801bf19bf704b"},
|
||||
{4194304, "5ed0e07aa6ea5c1beb9402b4d807258f27d40773"},
|
||||
{4194305, "67bd0b9247db92e0e7d7e29a0947a50fedcb5452"},
|
||||
{8388607, "41a8e2eb044c2e242971b5445d7be2a13fc0dd84"},
|
||||
{8388608, "267a970917c624c11fe624276ec60233a66dc2c0"},
|
||||
{8388609, "37b60b308d553d2732aefb62b3ea88f74acfa13f"},
|
||||
} {
|
||||
d := mrhash.New()
|
||||
var toWrite int
|
||||
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
|
||||
n, err := d.Write(data)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, chunk, n)
|
||||
}
|
||||
n, err := d.Write(data[:toWrite])
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, toWrite, n)
|
||||
got1 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got1, fmt.Sprintf("when testing length %d", n))
|
||||
got2 := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got2, fmt.Sprintf("when testing length %d (2nd sum)", n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
|
||||
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
|
||||
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
|
||||
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
|
||||
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
|
||||
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
|
||||
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
|
||||
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
|
||||
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
|
||||
|
||||
func TestSumCalledTwice(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
d.Reset()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 20, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := mrhash.New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -36,6 +37,8 @@ import (
|
||||
mega "github.com/t3rm1n4l/go-mega"
|
||||
)
|
||||
|
||||
const enc = encodings.Mega
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
@@ -245,14 +248,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
||||
// should refer to the root
|
||||
// should refer to the root.
|
||||
// It also encodes the parts into backend specific encoding
|
||||
func splitNodePath(nodePath string) (parts []string) {
|
||||
nodePath = path.Clean(nodePath)
|
||||
parts = strings.Split(nodePath, "/")
|
||||
if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
|
||||
if nodePath == "." || nodePath == "/" {
|
||||
return nil
|
||||
}
|
||||
return parts
|
||||
nodePath = enc.FromStandardPath(nodePath)
|
||||
return strings.Split(nodePath, "/")
|
||||
}
|
||||
|
||||
// findNode looks up the node for the path of the name given from the root given
|
||||
@@ -418,7 +422,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
errors := 0
|
||||
// similar to f.deleteNode(trash) but with HardDelete as true
|
||||
for _, item := range items {
|
||||
fs.Debugf(f, "Deleting trash %q", item.GetName())
|
||||
fs.Debugf(f, "Deleting trash %q", enc.ToStandardName(item.GetName()))
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
@@ -500,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
||||
remote := path.Join(dir, info.GetName())
|
||||
remote := path.Join(dir, enc.ToStandardName(info.GetName()))
|
||||
switch info.GetType() {
|
||||
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
@@ -722,7 +726,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, dstLeaf)
|
||||
err = f.srv.Rename(info, enc.FromStandardName(dstLeaf))
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -871,13 +875,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
fs.Infof(srcDir, "merging %q", info.GetName())
|
||||
fs.Infof(srcDir, "merging %q", enc.ToStandardName(info.GetName()))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", enc.ToStandardName(info.GetName()), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
@@ -1120,7 +1124,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
|
||||
u, err = o.fs.srv.NewUpload(dirNode, enc.FromStandardName(leaf), size)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -15,17 +15,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.OneDrive
|
||||
|
||||
const (
|
||||
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
|
||||
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
|
||||
@@ -63,15 +66,20 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
err := oauthutil.Config("onedrive", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
@@ -143,7 +151,7 @@ func init() {
|
||||
}
|
||||
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &sites)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
}
|
||||
@@ -172,7 +180,7 @@ func init() {
|
||||
// query Microsoft Graph
|
||||
if finalDriveID == "" {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(&opts, nil, &drives)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
@@ -194,7 +202,7 @@ func init() {
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(&opts, nil, &rootItem)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
}
|
||||
@@ -217,9 +225,9 @@ func init() {
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k.
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -343,10 +351,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -367,11 +375,11 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
Path: "/root:/" + rest.URLPathEscape(enc.FromStandardPath(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, resp, err
|
||||
@@ -426,7 +434,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
}
|
||||
}
|
||||
|
||||
return f.readMetaDataForPathRelativeToID(baseNormalizedID, relPath)
|
||||
return f.readMetaDataForPathRelativeToID(ctx, baseNormalizedID, relPath)
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
@@ -592,7 +600,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
if !ok {
|
||||
return "", false, errors.New("couldn't find parent ID")
|
||||
}
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(pathID, leaf)
|
||||
info, resp, err := f.readMetaDataForPathRelativeToID(ctx, pathID, leaf)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return "", false, nil
|
||||
@@ -615,11 +623,11 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -642,7 +650,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := newOptsCall(dirID, "GET", "/children?$top=1000")
|
||||
@@ -651,7 +659,7 @@ OUTER:
|
||||
var result api.ListChildrenResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -675,7 +683,7 @@ OUTER:
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.GetName())
|
||||
item.Name = enc.ToStandardName(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -709,7 +717,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
|
||||
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
|
||||
return false
|
||||
@@ -793,12 +801,12 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -821,7 +829,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
if check {
|
||||
// check to see if there are any items
|
||||
found, err := f.listAll(rootID, false, false, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
@@ -831,7 +839,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -912,8 +920,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
@@ -931,7 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
replacedLeaf := enc.FromStandardName(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
@@ -941,7 +949,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©Req, nil)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©Req, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1015,7 +1023,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
@@ -1029,7 +1037,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1122,7 +1130,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
// Get timestamps of src so they can be preserved
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(srcID, "")
|
||||
srcInfo, _, err := srcFs.readMetaDataForPathRelativeToID(ctx, srcID, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1130,7 +1138,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
@@ -1144,7 +1152,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1170,7 +1178,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &drive)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &drive)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1191,12 +1199,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1210,7 +1218,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
var resp *http.Response
|
||||
var result api.CreateShareLinkResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &share, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &share, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1240,9 +1248,19 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in server given a remote
|
||||
func (f *Fs) rootPath(remote string) string {
|
||||
return f.rootSlash() + remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in local functions
|
||||
func (o *Object) rootPath() string {
|
||||
return o.fs.rootPath(o.remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return replaceReservedChars(f.rootSlash() + remote)
|
||||
return enc.FromStandardPath(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
@@ -1257,7 +1275,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == hash.QuickXorHash {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
@@ -1319,7 +1337,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "itemNotFound" {
|
||||
@@ -1354,7 +1372,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1370,7 +1388,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
@@ -1405,7 +1423,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1428,7 +1446,8 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1441,7 +1460,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createRequest, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1454,7 +1473,7 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: url,
|
||||
@@ -1464,28 +1483,30 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
|
||||
}
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if resp != nil {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
retry, err := shouldRetry(resp, err)
|
||||
if !retry && resp != nil {
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
// read the item
|
||||
info = &api.Item{}
|
||||
return false, json.NewDecoder(resp.Body).Decode(info)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
return retry, err
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
// read the item
|
||||
info = &api.Item{}
|
||||
return false, json.Unmarshal(body, info)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// cancelUploadSession cancels an upload session
|
||||
func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
RootURL: url,
|
||||
@@ -1493,7 +1514,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -1515,7 +1536,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
cancelErr := o.cancelUploadSession(uploadURL)
|
||||
cancelErr := o.cancelUploadSession(ctx, uploadURL)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
|
||||
}
|
||||
@@ -1551,7 +1572,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(uploadURL, position, size, seg, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1578,7 +1599,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
@@ -1592,7 +1613,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "nameAlreadyExists" {
|
||||
// Make the error more user-friendly
|
||||
@@ -1644,7 +1665,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
/*
|
||||
Translate file names for one drive
|
||||
|
||||
OneDrive reserved characters
|
||||
|
||||
The following characters are OneDrive reserved characters, and can't
|
||||
be used in OneDrive folder and file names.
|
||||
|
||||
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||
onedrive-business-reserved
|
||||
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||
|
||||
Note: Folder names can't end with a period (.).
|
||||
|
||||
Note: OneDrive for Business file or folder names cannot begin with a
|
||||
tilde ('~').
|
||||
|
||||
*/
|
||||
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package onedrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -25,6 +26,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.OpenDrive
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://dev.opendrive.com/api/v1"
|
||||
minSleep = 10 * time.Millisecond
|
||||
@@ -161,7 +164,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
Method: "POST",
|
||||
Path: "/session/login.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &account, &f.session)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &account, &f.session)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -246,7 +249,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
removeDirData := removeFolder{SessionID: f.session.SessionID, FolderID: id}
|
||||
opts := rest.Opts{
|
||||
@@ -254,7 +257,7 @@ func (f *Fs) deleteObject(id string) error {
|
||||
NoResponse: true,
|
||||
Path: "/folder/remove.json",
|
||||
}
|
||||
resp, err := f.srv.CallJSON(&opts, &removeDirData, nil)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &removeDirData, nil)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -275,14 +278,14 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item, err := f.readMetaDataForFolderID(rootID)
|
||||
item, err := f.readMetaDataForFolderID(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if check && len(item.Files) != 0 {
|
||||
return errors.New("folder not empty")
|
||||
}
|
||||
err = f.deleteObject(rootID)
|
||||
err = f.deleteObject(ctx, rootID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -353,7 +356,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -410,7 +413,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -509,7 +512,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &moveFolderData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -585,18 +588,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
return o, enc.FromStandardName(leaf), directoryID, nil
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
|
||||
func (f *Fs) readMetaDataForFolderID(ctx context.Context, id string) (info *FolderList, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + id,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -636,12 +639,16 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
var resp *http.Response
|
||||
response := createFileResponse{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)}
|
||||
createFileData := createFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FolderID: directoryID,
|
||||
Name: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &createFileData, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -683,7 +690,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
createDirData := createFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: replaceReservedChars(leaf),
|
||||
FolderName: enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderPublicUpl: 0,
|
||||
@@ -694,7 +701,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Method: "POST",
|
||||
Path: "/folder.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, &createDirData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &createDirData, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -722,15 +729,15 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
Method: "GET",
|
||||
Path: "/folder/list.json/" + f.session.SessionID + "/" + pathID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
if leaf == folder.Name {
|
||||
@@ -769,7 +776,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
folderList := FolderList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -777,7 +784,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
@@ -788,7 +795,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, file := range folderList.Files {
|
||||
file.Name = restoreReservedChars(file.Name)
|
||||
file.Name = enc.ToStandardName(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
@@ -851,9 +858,13 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
NoResponse: true,
|
||||
Path: "/file/filesettings.json",
|
||||
}
|
||||
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
|
||||
update := modTimeFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FileID: o.id,
|
||||
FileModificationTime: strconv.FormatInt(modTime.Unix(), 10),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
@@ -873,7 +884,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -892,7 +903,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
NoResponse: true,
|
||||
Path: "/file.json/" + o.fs.session.SessionID + "/" + o.id,
|
||||
}
|
||||
resp, err := o.fs.srv.Call(&opts)
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
@@ -920,7 +931,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &openUploadData, &openResponse)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -966,7 +977,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
MultipartFileName: o.remote, // ..name of the file for the attached file
|
||||
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &reply)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &reply)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -989,7 +1000,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/upload/close_file_upload.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &closeUploadData, &closeResponse)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &closeUploadData, &closeResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1015,7 +1026,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
NoResponse: true,
|
||||
Path: "/file/access.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &update, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1038,9 +1049,10 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &folderList)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
Translate file names for OpenDrive
|
||||
|
||||
OpenDrive reserved characters
|
||||
|
||||
The following characters are OpenDrive reserved characters, and can't
|
||||
be used in OpenDrive folder and file names.
|
||||
|
||||
\ / : * ? " < > |
|
||||
|
||||
OpenDrive files and folders can't have leading or trailing spaces also.
|
||||
|
||||
*/
|
||||
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// OpenDrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
invCharMap map[rune]rune
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package opendrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{" path/ leading spaces", "␠path/␠ leading spaces"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -35,6 +36,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Pcloud
|
||||
|
||||
const (
|
||||
rcloneClientID = "DnONSzyJXpm"
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
@@ -175,21 +178,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for pcloud
|
||||
//
|
||||
// Generally all characters are allowed in filenames, except the NULL
|
||||
// byte, forward and backslash (/,\ and \0)
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for pcloud
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -201,7 +189,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -334,7 +322,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -354,10 +342,10 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/createfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("name", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("name", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -400,7 +388,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/listfolder",
|
||||
@@ -412,7 +400,7 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list
|
||||
var result api.ItemResult
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -430,7 +418,7 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list
|
||||
continue
|
||||
}
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
@@ -458,7 +446,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.IsFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -563,7 +551,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -622,13 +610,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -666,7 +654,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
var resp *http.Response
|
||||
var result api.Error
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -701,12 +689,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -798,12 +786,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -830,7 +818,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var resp *http.Response
|
||||
var q api.UserInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &q)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
||||
err = q.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -881,7 +869,7 @@ func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -984,7 +972,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// downloadURL fetches the download link
|
||||
func (o *Object) downloadURL() (URL string, err error) {
|
||||
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
||||
if o.id == "" {
|
||||
return "", errors.New("can't download - no id")
|
||||
}
|
||||
@@ -1000,7 +988,7 @@ func (o *Object) downloadURL() (URL string, err error) {
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1016,7 +1004,7 @@ func (o *Object) downloadURL() (URL string, err error) {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url, err := o.downloadURL()
|
||||
url, err := o.downloadURL(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1027,7 +1015,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1078,7 +1066,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
}
|
||||
leaf = replaceReservedChars(leaf)
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
@@ -1104,7 +1092,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1134,7 +1122,7 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
var result api.ItemResult
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
err = result.Error.Update(err)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
83
backend/premiumizeme/api/types.go
Normal file
83
backend/premiumizeme/api/types.go
Normal file
@@ -0,0 +1,83 @@
|
||||
// Package api contains definitions for using the premiumize.me API
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response is returned by all messages and embedded in the
|
||||
// structures below
|
||||
type Response struct {
|
||||
Message string `json:"message,omitempty"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Response) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Status, e.Message)
|
||||
}
|
||||
|
||||
// AsErr checks the status and returns an err if bad or nil if good
|
||||
func (e *Response) AsErr() error {
|
||||
if e.Status != "success" {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Item Types
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item refers to a file or folder
|
||||
type Item struct {
|
||||
Breadcrumbs []Breadcrumb `json:"breadcrumbs"`
|
||||
CreatedAt int64 `json:"created_at,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Link string `json:"link,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
StreamLink string `json:"stream_link,omitempty"`
|
||||
Type string `json:"type"`
|
||||
TranscodeStatus string `json:"transcode_status"`
|
||||
IP string `json:"ip"`
|
||||
MimeType string `json:"mime_type"`
|
||||
}
|
||||
|
||||
// Breadcrumb is part the breadcrumb trail for a file or folder. It
|
||||
// is returned as part of folder/list if required
|
||||
type Breadcrumb struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderListResponse is the response to folder/list
|
||||
type FolderListResponse struct {
|
||||
Response
|
||||
Content []Item `json:"content"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderCreateResponse is the response to folder/create
|
||||
type FolderCreateResponse struct {
|
||||
Response
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderUploadinfoResponse is the response to folder/uploadinfo
|
||||
type FolderUploadinfoResponse struct {
|
||||
Response
|
||||
Token string `json:"token,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse is the response to account/info
|
||||
type AccountInfoResponse struct {
|
||||
Response
|
||||
CustomerID string `json:"customer_id,omitempty"`
|
||||
LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit
|
||||
PremiumUntil int64 `json:"premium_until,omitempty"`
|
||||
SpaceUsed float64 `json:"space_used,omitempty"`
|
||||
}
|
||||
1192
backend/premiumizeme/premiumizeme.go
Normal file
1192
backend/premiumizeme/premiumizeme.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/premiumizeme/premiumizeme_test.go
Normal file
17
backend/premiumizeme/premiumizeme_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test filesystem interface
|
||||
package premiumizeme_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/premiumizeme"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPremiumizeMe:",
|
||||
NilObject: (*premiumizeme.Object)(nil),
|
||||
})
|
||||
}
|
||||
693
backend/putio/fs.go
Normal file
693
backend/putio/fs.go
Normal file
@@ -0,0 +1,693 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Fs represents a remote Putio server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
client *putio.Client // client for making API calls to Put.io
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
oAuthClient *http.Client
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Putio root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
}
|
||||
p := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
oAuthClient: oAuthClient,
|
||||
}
|
||||
p.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(p)
|
||||
p.dirCache = dircache.New(root, "0", p)
|
||||
ctx := context.Background()
|
||||
// Find the current root
|
||||
err = p.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *p
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return p, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// unable to list folder so return old f
|
||||
return p, nil
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
p.dirCache = tempF.dirCache
|
||||
p.root = tempF.root
|
||||
return p, fs.ErrorIsFile
|
||||
}
|
||||
// fs.Debugf(p, "Root id: %s", p.dirCache.RootID())
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func itoa(i int64) string {
|
||||
return strconv.FormatInt(i, 10)
|
||||
}
|
||||
|
||||
func atoi(a string) int64 {
|
||||
i, err := strconv.ParseInt(a, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("newID=%v, err=%v", newID, &err)
|
||||
parentID := atoi(pathID)
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, enc.FromStandardName(leaf), parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("pathIDOut=%v, found=%v, err=%v", pathIDOut, found, &err)
|
||||
if pathID == "0" && leaf == "" {
|
||||
// that's the root directory
|
||||
return pathID, true, nil
|
||||
}
|
||||
fileID := atoi(pathID)
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing file: %d", fileID)
|
||||
children, _, err = f.client.Files.List(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
if enc.ToStandardName(child.Name) == leaf {
|
||||
found = true
|
||||
pathIDOut = itoa(child.ID)
|
||||
if !child.IsDir() {
|
||||
err = fs.ErrorIsFile
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentID := atoi(directoryID)
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files inside List: %d", parentID)
|
||||
children, _, err = f.client.Files.List(ctx, parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
remote := path.Join(dir, enc.ToStandardName(child.Name))
|
||||
// fs.Debugf(f, "child: %s", remote)
|
||||
if child.IsDir() {
|
||||
f.dirCache.Put(remote, itoa(child.ID))
|
||||
d := fs.NewDir(remote, child.UpdatedAt.Time)
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileID, err := f.sendUpload(ctx, loc, size, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting file: %d", fileID)
|
||||
entry, err = f.client.Files.Get(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, entry)
|
||||
}
|
||||
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(enc.FromStandardName(name)))
|
||||
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
if retry {
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
return false, errors.New("empty location header from upload create")
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
|
||||
if size == 0 {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
var start int64
|
||||
buf := make([]byte, defaultChunkSize)
|
||||
for start < size {
|
||||
reqSize := size - start
|
||||
if reqSize >= int64(defaultChunkSize) {
|
||||
reqSize = int64(defaultChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
|
||||
// TODO get file offset and seek to the position
|
||||
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
if res.StatusCode != 204 {
|
||||
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
|
||||
}
|
||||
sfid := res.Header.Get("putio-file-id")
|
||||
if sfid != "" {
|
||||
fileID, err = strconv.ParseInt(sfid, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
req.Header.Set("content-type", "application/offset+octet-stream")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
|
||||
root := strings.Trim(path.Join(f.root, dir), "/")
|
||||
|
||||
// can't remove root
|
||||
if root == "" {
|
||||
return errors.New("can't remove root directory")
|
||||
}
|
||||
|
||||
// check directory exists
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
dirID := atoi(directoryID)
|
||||
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
|
||||
// remove it
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", dirID)
|
||||
err = f.client.Files.Delete(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.FlushDir(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootID := atoi(f.dirCache.RootID())
|
||||
// Let putio delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", rootID)
|
||||
err = f.client.Files.Delete(ctx, rootID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.ResetRoot()
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// defer log.Trace(f, "src=%+v, srcRemote=%v, dstRemote", src, srcRemote, dstRemote)("err=%v", &err)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
// defer log.Trace(f, "")("usage=%+v, err=%v", usage, &err)
|
||||
var ai putio.AccountInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting account info")
|
||||
ai, err = f.client.Account.Info(ctx)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(ai.Disk.Used), // bytes in use
|
||||
Free: fs.NewUsageValue(ai.Disk.Avail), // bytes which can be uploaded before reaching the quota
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.CRC32)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
// defer log.Trace(f, "")("")
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/trash/empty", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// fs.Debugf(f, "emptying trash")
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
286
backend/putio/object.go
Normal file
286
backend/putio/object.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a Putio object
|
||||
//
|
||||
// Putio Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
file *putio.File
|
||||
remote string // The remote path
|
||||
modtime time.Time
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "remote=%v", remote)("o=%+v, err=%v", &o, &err)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = obj.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info putio.File) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "remote=%v, info=+v", remote, &info)("o=%+v, err=%v", &o, &err)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = obj.setMetadataFromEntry(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.CRC32 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||
}
|
||||
return o.file.CRC32, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
if o.file == nil {
|
||||
return 0
|
||||
}
|
||||
return o.file.Size
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.file == nil {
|
||||
return ""
|
||||
}
|
||||
return itoa(o.file.ID)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return o.file.ContentType
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a putio.File
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
o.file = &info
|
||||
o.modtime = info.UpdatedAt.Time
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reads the entry for a file from putio
|
||||
func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
// defer log.Trace(o, "")("f=%+v, err=%v", f, &err)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(enc.FromStandardName(leaf)), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = o.fs.client.Do(req, &resp)
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
return false, fs.ErrorObjectNotFound
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.File.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &resp.File, err
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
|
||||
if o.file != nil {
|
||||
return nil
|
||||
}
|
||||
entry, err := o.readEntry(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetadataFromEntry(*entry)
|
||||
}
|
||||
|
||||
// Returns the remote path for the object
|
||||
func (o *Object) remotePath() string {
|
||||
return path.Join(o.fs.root, o.remote)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if o.modtime.IsZero() {
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
}
|
||||
return o.modtime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// defer log.Trace(o, "modTime=%v", modTime.String())("err=%v", &err)
|
||||
req, err := o.fs.client.NewRequest(ctx, "POST", "/v2/files/touch?file_id="+strconv.FormatInt(o.file.ID, 10)+"&updated_at="+url.QueryEscape(modTime.Format(time.RFC3339)), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(o, "setting modtime: %s", modTime.String())
|
||||
_, err = o.fs.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modtime = modTime
|
||||
if o.file != nil {
|
||||
o.file.UpdatedAt.Time = modTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// defer log.Trace(o, "")("err=%v", &err)
|
||||
var storageURL string
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
for header, value := range headers {
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*o = *(newObj.(*Object))
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer log.Trace(o, "")("err=%v", &err)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
|
||||
err = o.fs.client.Files.Delete(ctx, o.file.ID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
86
backend/putio/putio.go
Normal file
86
backend/putio/putio.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
// TestPutio
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 255
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = false
|
||||
*/
|
||||
const enc = encodings.Putio
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "4131"
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
putioConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for ignoring unnecessary files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r)$`)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("putio", name, m, putioConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
16
backend/putio/putio_test.go
Normal file
16
backend/putio/putio_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Test Put.io filesystem interface
|
||||
package putio
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPutio:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -14,21 +14,24 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/config"
|
||||
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
|
||||
qs "github.com/yunify/qingstor-sdk-go/service"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
|
||||
qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
const enc = encodings.QingStor
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -146,16 +149,15 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -176,22 +178,24 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a qingstor 'url'
|
||||
func qsParsePath(path string) (bucket, key string, err error) {
|
||||
// Pattern to match a qingstor path
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
|
||||
} else {
|
||||
bucket, key = parts[1], parts[2]
|
||||
key = strings.Trim(key, "/")
|
||||
}
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// Split an URL into three parts: protocol host and port
|
||||
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
/*
|
||||
@@ -301,6 +305,12 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -317,10 +327,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -331,36 +337,34 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: key,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
name: name,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
|
||||
if f.root != "" {
|
||||
if !strings.HasSuffix(f.root, "/") {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
|
||||
if err == nil {
|
||||
f.root = path.Dir(key)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -375,18 +379,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("QingStor bucket %s", f.bucket)
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("QingStor root")
|
||||
}
|
||||
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -426,7 +430,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -435,22 +440,21 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
key := f.root + remote
|
||||
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
source := path.Join("/", srcBucket, srcPath)
|
||||
|
||||
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
|
||||
// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
|
||||
req := qs.PutObjectInput{
|
||||
XQSCopySource: &source,
|
||||
}
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
_, err = bucketInit.PutObject(dstPath, &req)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
// fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
@@ -511,29 +515,27 @@ type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
|
||||
prefix := f.root
|
||||
if dir != "" {
|
||||
prefix += dir + "/"
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
|
||||
maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
|
||||
for {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME need to implement ALL loop
|
||||
req := qs.ListObjectsInput{
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &prefix,
|
||||
Prefix: &directory,
|
||||
Limit: &maxLimit,
|
||||
Marker: marker,
|
||||
}
|
||||
@@ -546,7 +548,6 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) erro
|
||||
}
|
||||
return err
|
||||
}
|
||||
rootLength := len(f.root)
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix == nil {
|
||||
@@ -554,15 +555,18 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) erro
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[rootLength:]
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
|
||||
err = fn(remote, &qs.KeyType{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -571,20 +575,27 @@ func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) erro
|
||||
}
|
||||
|
||||
for _, object := range resp.Keys {
|
||||
key := qs.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Logf(f, "Odd name received %q", key)
|
||||
remote := qs.StringValue(object.Key)
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := key[rootLength:]
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if resp.HasMore != nil && !*resp.HasMore {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
||||
//marker = resp.Keys[len(resp.Keys)-1].Key
|
||||
fs.Errorf(f, "Expecting NextMarker but didn't find one")
|
||||
break
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
@@ -610,20 +621,10 @@ func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists files and directories to out
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -637,16 +638,12 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to out
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
req := qs.ListBucketsInput{
|
||||
Location: &f.zone,
|
||||
}
|
||||
@@ -656,7 +653,7 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
for _, bucket := range resp.Buckets {
|
||||
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
|
||||
d := fs.NewDir(enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
@@ -672,10 +669,14 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, dir)
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -695,106 +696,105 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = bucketInit.Head()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
err = nil
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
return false, err
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
/* When delete a bucket, qingstor need about 60 second to sync status;
|
||||
So, need wait for it sync end if we try to operation a just deleted bucket
|
||||
*/
|
||||
retries := 0
|
||||
for retries <= 120 {
|
||||
statistics, err := bucketInit.GetStatistics()
|
||||
if statistics == nil || err != nil {
|
||||
break
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !f.bucketDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.bucketOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = bucketInit.Put()
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusConflict {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
}
|
||||
|
||||
return err
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// dirIsEmpty check if the bucket empty
|
||||
func (f *Fs) dirIsEmpty() (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
/* When delete a bucket, qingstor need about 60 second to sync status;
|
||||
So, need wait for it sync end if we try to operation a just deleted bucket
|
||||
*/
|
||||
wasDeleted := false
|
||||
retries := 0
|
||||
for retries <= 120 {
|
||||
statistics, err := bucketInit.GetStatistics()
|
||||
if statistics == nil || err != nil {
|
||||
break
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
wasDeleted = true
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
retries = 0
|
||||
for retries <= 120 {
|
||||
_, err = bucketInit.Put()
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusConflict {
|
||||
if wasDeleted {
|
||||
fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
return err
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// bucketIsEmpty check if the bucket empty
|
||||
func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -812,71 +812,64 @@ func (f *Fs) dirIsEmpty() (bool, error) {
|
||||
|
||||
// Rmdir delete a bucket
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
isEmpty, err := f.dirIsEmpty()
|
||||
isEmpty, err := f.bucketIsEmpty(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
|
||||
// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
|
||||
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
retries := 0
|
||||
for retries <= 10 {
|
||||
_, delErr := bucketInit.Delete()
|
||||
if delErr != nil {
|
||||
if e, ok := delErr.(*qsErr.QingStorError); ok {
|
||||
switch e.Code {
|
||||
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
|
||||
// wait for lease status ready
|
||||
case "lease_not_ready":
|
||||
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
|
||||
retries++
|
||||
time.Sleep(time.Second * 1)
|
||||
continue
|
||||
default:
|
||||
err = e
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = delErr
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
// fs.Debugf(f, "Deleting the bucket %s", bucket)
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
}
|
||||
return err
|
||||
retries := 0
|
||||
for retries <= 10 {
|
||||
_, delErr := bucketInit.Delete()
|
||||
if delErr != nil {
|
||||
if e, ok := delErr.(*qsErr.QingStorError); ok {
|
||||
switch e.Code {
|
||||
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
|
||||
// wait for lease status ready
|
||||
case "lease_not_ready":
|
||||
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
|
||||
retries++
|
||||
time.Sleep(time.Second * 1)
|
||||
continue
|
||||
default:
|
||||
err = e
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = delErr
|
||||
}
|
||||
break
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
// fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -938,10 +931,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
// Copy the object to itself to update the metadata
|
||||
key := o.fs.root + o.remote
|
||||
sourceKey := path.Join("/", o.fs.bucket, key)
|
||||
bucket, bucketPath := o.split()
|
||||
sourceKey := path.Join("/", bucket, bucketPath)
|
||||
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -950,20 +943,21 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
XQSCopySource: &sourceKey,
|
||||
ContentType: &mimeType,
|
||||
}
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
_, err = bucketInit.PutObject(bucketPath, &req)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := qs.GetObjectInput{}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
@@ -975,7 +969,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
}
|
||||
}
|
||||
}
|
||||
resp, err := bucketInit.GetObject(key, &req)
|
||||
resp, err := bucketInit.GetObject(bucketPath, &req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -985,21 +979,21 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
// Update in to the object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: o.fs.bucket,
|
||||
bucket: bucket,
|
||||
zone: o.fs.zone,
|
||||
key: key,
|
||||
key: bucketPath,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
@@ -1023,13 +1017,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
_, err = bucketInit.DeleteObject(key)
|
||||
_, err = bucketInit.DeleteObject(bucketPath)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
qs "github.com/yunify/qingstor-sdk-go/service"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
692
backend/s3/s3.go
692
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -11,8 +11,9 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
|
||||
@@ -36,7 +36,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
hashCommandNotSupported = "none"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -102,9 +103,14 @@ when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for SFTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will:
|
||||
- ask for a password
|
||||
- not contact the ssh agent
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
@@ -127,6 +133,16 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "md5sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read md5 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sha1sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -146,14 +162,17 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
opt Options // parsed options
|
||||
m configmap.Mapper // config
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
@@ -350,7 +369,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword) || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -421,16 +440,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(ctx, name, root, opt, sshConfig)
|
||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
@@ -756,45 +776,79 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.cachedHashes != nil {
|
||||
return *f.cachedHashes
|
||||
// run runds cmd on the remote end returning standard output
|
||||
func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
}()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
err = session.Run(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
|
||||
}
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err)
|
||||
return hash.Set(hash.None)
|
||||
if f.cachedHashes != nil {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
sha1Output, _ := session.Output("echo 'abc' | sha1sum")
|
||||
expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451"
|
||||
_ = session.Close()
|
||||
|
||||
session, err = c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return hash.Set(hash.None)
|
||||
// look for a hash command which works
|
||||
checkHash := func(commands []string, expected string, hashCommand *string, changed *bool) bool {
|
||||
if *hashCommand == hashCommandNotSupported {
|
||||
return false
|
||||
}
|
||||
if *hashCommand != "" {
|
||||
return true
|
||||
}
|
||||
*changed = true
|
||||
for _, command := range commands {
|
||||
output, err := f.run(command)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
output = bytes.TrimSpace(output)
|
||||
fs.Debugf(f, "checking %q command: %q", command, output)
|
||||
if parseHash(output) == expected {
|
||||
*hashCommand = command
|
||||
return true
|
||||
}
|
||||
}
|
||||
*hashCommand = hashCommandNotSupported
|
||||
return false
|
||||
}
|
||||
md5Output, _ := session.Output("echo 'abc' | md5sum")
|
||||
expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1"
|
||||
_ = session.Close()
|
||||
|
||||
sha1Works := parseHash(sha1Output) == expectedSha1
|
||||
md5Works := parseHash(md5Output) == expectedMd5
|
||||
changed := false
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
|
||||
}
|
||||
|
||||
set := hash.NewHashSet()
|
||||
if !sha1Works && !md5Works {
|
||||
set.Add(hash.None)
|
||||
}
|
||||
if sha1Works {
|
||||
set.Add(hash.SHA1)
|
||||
}
|
||||
@@ -802,26 +856,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
set.Add(hash.MD5)
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
f.cachedHashes = &set
|
||||
return set
|
||||
}
|
||||
|
||||
// About gets usage stats
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About get SFTP connection")
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About put SFTP connection")
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(f.root)
|
||||
if f.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
|
||||
@@ -829,14 +869,12 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
err = session.Run("df -k " + escapedPath)
|
||||
stdout, err := f.run("df -k " + escapedPath)
|
||||
if err != nil {
|
||||
_ = session.Close()
|
||||
return nil, errors.Wrap(err, "About invocation of df failed. Your remote may not support about.")
|
||||
return nil, errors.Wrap(err, "your remote may not support About")
|
||||
}
|
||||
_ = session.Close()
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout.Bytes())
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||
usage := &fs.Usage{}
|
||||
if usageTotal >= 0 {
|
||||
usage.Total = fs.NewUsageValue(usageTotal)
|
||||
@@ -871,23 +909,27 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
_ = o.fs.Hashes()
|
||||
|
||||
var hashCmd string
|
||||
if r == hash.MD5 {
|
||||
if o.md5sum != nil {
|
||||
return *o.md5sum, nil
|
||||
}
|
||||
hashCmd = "md5sum"
|
||||
hashCmd = o.fs.opt.Md5sumCommand
|
||||
} else if r == hash.SHA1 {
|
||||
if o.sha1sum != nil {
|
||||
return *o.sha1sum, nil
|
||||
}
|
||||
hashCmd = "sha1sum"
|
||||
hashCmd = o.fs.opt.Sha1sumCommand
|
||||
} else {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
if hashCmd == "" || hashCmd == hashCommandNotSupported {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
@@ -908,6 +950,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
|
||||
if err != nil {
|
||||
_ = session.Close()
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
@@ -915,7 +958,10 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
str := parseHash(stdout.Bytes())
|
||||
b := stdout.Bytes()
|
||||
fs.Debugf(nil, "sftp output = %q", b)
|
||||
str := parseHash(b)
|
||||
fs.Debugf(nil, "sftp hash = %q", str)
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
@@ -924,7 +970,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return str, nil
|
||||
}
|
||||
|
||||
var shellEscapeRegex = regexp.MustCompile(`[^A-Za-z0-9_.,:/@\n-]`)
|
||||
var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
|
||||
|
||||
// Escape a string s.t. it cannot cause unintended behavior
|
||||
// when sending it to a shell.
|
||||
@@ -937,7 +983,9 @@ func shellEscape(str string) string {
|
||||
// an invocation of md5sum/sha1sum to a hash string
|
||||
// as expected by the rest of this application
|
||||
func parseHash(bytes []byte) string {
|
||||
return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
152
backend/sharefile/api/types.go
Normal file
152
backend/sharefile/api/types.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Package api contains definitions for using the premiumize.me API
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ListRequestSelect should be used in $select for Items/Children
|
||||
const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate"
|
||||
|
||||
// ListResponse is returned from the Items/Children call
|
||||
type ListResponse struct {
|
||||
OdataCount int `json:"odata.count"`
|
||||
Value []Item `json:"value"`
|
||||
}
|
||||
|
||||
// Item Types
|
||||
const (
|
||||
ItemTypeFolder = "ShareFile.Api.Models.Folder"
|
||||
ItemTypeFile = "ShareFile.Api.Models.File"
|
||||
)
|
||||
|
||||
// Item refers to a file or folder
|
||||
type Item struct {
|
||||
FileCount int32 `json:"FileCount,omitempty"`
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
CreatedAt time.Time `json:"CreationDate,omitempty"`
|
||||
ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
IsHidden bool `json:"IsHidden,omitempty"`
|
||||
Size int64 `json:"FileSizeBytes,omitempty"`
|
||||
Type string `json:"odata.type,omitempty"`
|
||||
ID string `json:"Id,omitempty"`
|
||||
Hash string `json:"Hash,omitempty"`
|
||||
}
|
||||
|
||||
// Error is an odata error return
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message struct {
|
||||
Lang string `json:"lang"`
|
||||
Value string `json:"value"`
|
||||
} `json:"message"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// Satisfy error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason)
|
||||
}
|
||||
|
||||
// Check Error satisfies error interface
|
||||
var _ error = &Error{}
|
||||
|
||||
// DownloadSpecification is the response to /Items/Download
|
||||
type DownloadSpecification struct {
|
||||
Token string `json:"DownloadToken"`
|
||||
URL string `json:"DownloadUrl"`
|
||||
Metadata string `json:"odata.metadata"`
|
||||
Type string `json:"odata.type"`
|
||||
}
|
||||
|
||||
// UploadRequest is set to /Items/Upload2 to receive an UploadSpecification
|
||||
type UploadRequest struct {
|
||||
Method string `json:"method"` // Upload method: one of: standard, streamed or threaded
|
||||
Raw bool `json:"raw"` // Raw post if true or MIME upload if false
|
||||
Filename string `json:"fileName"` // Uploaded item file name.
|
||||
Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size.
|
||||
Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not.
|
||||
CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item.
|
||||
ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item.
|
||||
BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed.
|
||||
BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload.
|
||||
CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume.
|
||||
StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts.
|
||||
Tool string `json:"tool,omitempty"` // Identifies the uploader tool.
|
||||
Title string `json:"title,omitempty"` // Item Title
|
||||
Details string `json:"details,omitempty"` // Item description
|
||||
IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation
|
||||
SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of.
|
||||
OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones
|
||||
ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise
|
||||
Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences
|
||||
ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days
|
||||
BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload.
|
||||
}
|
||||
|
||||
// UploadSpecification is returned from /Items/Upload
|
||||
type UploadSpecification struct {
|
||||
Method string `json:"Method"` // The Upload method that must be used for this upload
|
||||
PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data.
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads
|
||||
}
|
||||
|
||||
// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI
|
||||
type UploadFinishResponse struct {
|
||||
Error bool `json:"error"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
ErrorCode int `json:"errorCode"`
|
||||
Value []struct {
|
||||
UploadID string `json:"uploadid"`
|
||||
ParentID string `json:"parentid"`
|
||||
ID string `json:"id"`
|
||||
StreamID string `json:"streamid"`
|
||||
FileName string `json:"filename"`
|
||||
DisplayName string `json:"displayname"`
|
||||
Size int `json:"size"`
|
||||
Md5 string `json:"md5"`
|
||||
} `json:"value"`
|
||||
}
|
||||
|
||||
// ID returns the ID of the first response if available
|
||||
func (finish *UploadFinishResponse) ID() (string, error) {
|
||||
if finish.Error {
|
||||
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
|
||||
}
|
||||
if len(finish.Value) == 0 {
|
||||
return "", errors.New("upload failed: no results returned")
|
||||
}
|
||||
return finish.Value[0].ID, nil
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent folder
|
||||
type Parent struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// Zone is where the data is stored
|
||||
type Zone struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest is sent to PATCH /v3/Items(id)
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
ExpirationDate *time.Time `json:"ExpirationDate,omitempty"`
|
||||
Parent *Parent `json:"Parent,omitempty"`
|
||||
Zone *Zone `json:"Zone,omitempty"`
|
||||
ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
}
|
||||
22
backend/sharefile/generate_tzdata.go
Normal file
22
backend/sharefile/generate_tzdata.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./tzdata")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "sharefile",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "tzdata",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
1518
backend/sharefile/sharefile.go
Normal file
1518
backend/sharefile/sharefile.go
Normal file
File diff suppressed because it is too large
Load Diff
34
backend/sharefile/sharefile_test.go
Normal file
34
backend/sharefile/sharefile_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Test filesystem interface
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSharefile:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
193
backend/sharefile/tzdata_vfsdata.go
Normal file
193
backend/sharefile/tzdata_vfsdata.go
Normal file
File diff suppressed because one or more lines are too long
18
backend/sharefile/update-timezone.sh
Executable file
18
backend/sharefile/update-timezone.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Extract just the America/New_York timezone from
|
||||
tzinfo=$(go env GOROOT)/lib/time/zoneinfo.zip
|
||||
|
||||
rm -rf tzdata
|
||||
mkdir tzdata
|
||||
cd tzdata
|
||||
unzip ${tzinfo} America/New_York
|
||||
|
||||
cd ..
|
||||
# Make the embedded assets
|
||||
go run generate_tzdata.go
|
||||
|
||||
# tidy up
|
||||
rm -rf tzdata
|
||||
261
backend/sharefile/upload.go
Normal file
261
backend/sharefile/upload.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Upload large files for sharefile
|
||||
//
|
||||
// Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File
|
||||
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/sharefile/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
ctx context.Context
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
info *api.UploadSpecification // where to post chunks etc
|
||||
threads int // number of threads to use in upload
|
||||
streamed bool // set if using streamed upload
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(-1)
|
||||
if size >= 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
var streamed bool
|
||||
switch strings.ToLower(info.Method) {
|
||||
case "streamed":
|
||||
streamed = true
|
||||
case "threaded":
|
||||
streamed = false
|
||||
default:
|
||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||
}
|
||||
|
||||
threads := fs.Config.Transfers
|
||||
if threads > info.MaxNumberOfThreads {
|
||||
threads = info.MaxNumberOfThreads
|
||||
}
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
ctx: ctx,
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
size: size,
|
||||
threads: threads,
|
||||
info: info,
|
||||
parts: parts,
|
||||
streamed: streamed,
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// parse the api.UploadFinishResponse in respBody
|
||||
func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
|
||||
var finish api.UploadFinishResponse
|
||||
err = json.Unmarshal(respBody, &finish)
|
||||
if err != nil {
|
||||
// Sometimes the unmarshal fails in which case return the body
|
||||
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
|
||||
}
|
||||
return up.o.checkUploadResponse(up.ctx, &finish)
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error {
|
||||
md5sumRaw := md5.Sum(body)
|
||||
md5sum := hex.EncodeToString(md5sumRaw[:])
|
||||
size := int64(len(body))
|
||||
|
||||
// Add some more parameters to the ChunkURI
|
||||
u := up.info.ChunkURI
|
||||
u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json",
|
||||
part, offset, md5sum,
|
||||
)
|
||||
if fileHash != "" {
|
||||
u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s",
|
||||
offset+int64(len(body)),
|
||||
fileHash,
|
||||
)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: u,
|
||||
ContentLength: &size,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
opts.Body = up.wrap(bytes.NewReader(body))
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
}
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
return err
|
||||
}
|
||||
// If last chunk and using "streamed" transfer, get the response back now
|
||||
if up.streamed && fileHash != "" {
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
return nil
|
||||
}
|
||||
|
||||
// finish closes off the large upload and reads the metadata
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload")
|
||||
// For a streamed transfer we will already have read the info
|
||||
if up.streamed {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: up.info.FinishURI,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
if up.parts >= 0 {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Starting streaming upload of large file")
|
||||
}
|
||||
var (
|
||||
offset int64
|
||||
errs = make(chan error, 1)
|
||||
wg sync.WaitGroup
|
||||
err error
|
||||
wholeFileHash = md5.New()
|
||||
eof = false
|
||||
)
|
||||
outer:
|
||||
for part := int64(0); !eof; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = readers.ReadFill(up.in, buf)
|
||||
if err == io.EOF {
|
||||
eof = true
|
||||
buf = buf[:n]
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Hash it
|
||||
_, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf))
|
||||
|
||||
// Get file hash if was last chunk
|
||||
fileHash := ""
|
||||
if eof {
|
||||
fileHash = hex.EncodeToString(wholeFileHash.Sum(nil))
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
transferChunk := func(part, offset int64, buf []byte, fileHash string) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(ctx, part, offset, buf, fileHash)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
if up.streamed {
|
||||
transferChunk(part, offset, buf, fileHash) // streamed
|
||||
} else {
|
||||
go transferChunk(part, offset, buf, fileHash) // multithreaded
|
||||
}
|
||||
|
||||
offset += int64(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// check size read is correct
|
||||
if eof && err == nil && up.size >= 0 && up.size != offset {
|
||||
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
|
||||
}
|
||||
|
||||
// read any errors
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// finish regardless of errors
|
||||
finishErr := up.finish(ctx)
|
||||
if err == nil {
|
||||
err = finishErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -8,10 +8,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
@@ -19,12 +17,15 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -60,6 +61,8 @@ copy operations.`,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
const enc = encodings.Swift
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -207,17 +210,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote swift server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of container status
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -242,18 +244,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.container)
|
||||
if f.rootContainer == "" {
|
||||
return fmt.Sprintf("Swift root")
|
||||
}
|
||||
return fmt.Sprintf("Swift container %s path %s", f.container, f.root)
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.rootContainer)
|
||||
}
|
||||
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -312,21 +314,24 @@ func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a swift 'url'
|
||||
func parsePath(path string) (container, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find container in swift path %q", path)
|
||||
} else {
|
||||
container, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
|
||||
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(container), enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
func (o *Object) split() (container, containerPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// swiftConnection makes a connection to swift
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
@@ -409,47 +414,49 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
// exists before creating it.
|
||||
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
var err error
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(container, directory)
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -517,23 +524,26 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
|
||||
// listContainerRoot lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
// the container and directory supplied. The remote has prefix
|
||||
// removed from it and if addContainer is set then it adds the
|
||||
// container to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool, fn listFn) error {
|
||||
prefix := root
|
||||
if dir != "" {
|
||||
prefix += dir + "/"
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
// Options for ObjectsWalk
|
||||
opts := swift.ObjectsOpts{
|
||||
Prefix: prefix,
|
||||
Prefix: directory,
|
||||
Limit: listChunks,
|
||||
}
|
||||
if !recurse {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
@@ -548,17 +558,21 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
if !recurse {
|
||||
isDirectory = strings.HasSuffix(object.Name, "/")
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
if object.Name == prefix {
|
||||
if remote == prefix {
|
||||
// If we have zero length directory markers ending in / then swift
|
||||
// will return them in the listing for the directory which causes
|
||||
// duplicate directories. Ignore them here.
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
break
|
||||
@@ -572,8 +586,8 @@ func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool,
|
||||
type addEntryFn func(fs.DirEntry) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(f.container, f.root, dir, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
if isDirectory {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||
@@ -597,22 +611,13 @@ func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// mark the container as being OK
|
||||
func (f *Fs) markContainerOK() {
|
||||
if f.container != "" {
|
||||
f.containerOKMu.Lock()
|
||||
f.containerOK = true
|
||||
f.containerOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
if f.container == "" {
|
||||
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
if container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(dir, false, func(entry fs.DirEntry) error {
|
||||
err = f.list(container, directory, prefix, addContainer, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
@@ -620,15 +625,12 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
f.cache.MarkOK(container)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
@@ -638,7 +640,8 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
for _, container := range containers {
|
||||
d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
f.cache.MarkOK(container.Name)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
@@ -654,10 +657,14 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.container == "" {
|
||||
return f.listContainers(dir)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -675,20 +682,41 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
// of listing recursively than doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.container == "" {
|
||||
return errors.New("container needed for recursive list")
|
||||
}
|
||||
container, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(container, directory, prefix, addContainer, true, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if container == "" {
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container := entry.Remote()
|
||||
err = listR(container, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
} else {
|
||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -737,57 +765,57 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
// if we are at the root, then it is OK
|
||||
if f.container == "" {
|
||||
return nil
|
||||
}
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(f.container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
container, _ := f.split(dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
}
|
||||
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
}
|
||||
return err
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q created", container)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
err := f.cache.Remove(container, func() error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.c.ContainerDelete(container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q removed", container)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -806,7 +834,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list("", true, func(entry fs.DirEntry) error {
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -833,7 +861,8 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir(ctx, "")
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -842,10 +871,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
srcContainer, srcPath := srcObj.split()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -954,8 +983,9 @@ func (o *Object) readMetaData() (err error) {
|
||||
}
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
info, h, err = o.fs.c.Object(container, containerPath)
|
||||
return shouldRetryHeaders(h, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1012,8 +1042,9 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1031,9 +1062,10 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.FixRangeOption(options, o.size)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
return
|
||||
@@ -1051,20 +1083,20 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
segmentsRoot := o.fs.root + o.remote + "/"
|
||||
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, "", true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
@@ -1073,11 +1105,11 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
err = o.fs.c.ContainerDelete(segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1102,11 +1134,13 @@ func urlEncode(str string) string {
|
||||
// updateChunks updates the existing object using chunks to a separate
|
||||
// container. It returns a string which prefixes current segments.
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
@@ -1115,7 +1149,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1126,7 +1160,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
left := size
|
||||
i := 0
|
||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||
segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
|
||||
segmentsPath := path.Join(containerPath, uniquePrefix)
|
||||
in := bufio.NewReader(in0)
|
||||
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
|
||||
for {
|
||||
@@ -1135,7 +1169,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
if left > 0 {
|
||||
return "", err // read less than expected
|
||||
}
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
|
||||
break
|
||||
}
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
@@ -1146,46 +1180,45 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
}
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err == nil {
|
||||
segmentInfos = append(segmentInfos, segmentPath)
|
||||
}
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentInfos)
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
segmentInfos = nil
|
||||
return "", err
|
||||
}
|
||||
i++
|
||||
}
|
||||
// Upload the manifest
|
||||
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath))
|
||||
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentInfos)
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
segmentInfos = nil
|
||||
}
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
func deleteChunks(o *Object, segmentInfos []string) {
|
||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
if segmentInfos != nil && len(segmentInfos) > 0 {
|
||||
for _, v := range segmentInfos {
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, o.fs.segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(o.fs.segmentsContainer, v)
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, o.fs.segmentsContainer, e)
|
||||
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1195,10 +1228,11 @@ func deleteChunks(o *Object, segmentInfos []string) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.container == "" {
|
||||
return fserrors.FatalError(errors.New("container name needed in remote"))
|
||||
container, containerPath := o.split()
|
||||
if container == "" {
|
||||
return fserrors.FatalError(errors.New("can't upload files to the root"))
|
||||
}
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
err := o.fs.makeContainer(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1224,12 +1258,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
var inCount *readers.CountingReader
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
} else {
|
||||
// otherwise count the size for later
|
||||
inCount = readers.NewCountingReader(in)
|
||||
in = inCount
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1242,6 +1281,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
if inCount != nil {
|
||||
// update the size if streaming from the reader
|
||||
o.size = int64(inCount.BytesRead())
|
||||
}
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1258,13 +1301,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
container, containerPath := o.split()
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -2,10 +2,19 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -21,3 +30,50 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
// Check that PutStream works with NoChunk as it is the major code
|
||||
// deviation
|
||||
func (f *Fs) testNoChunk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.opt.NoChunk = true
|
||||
defer func() {
|
||||
f.opt.NoChunk = false
|
||||
}()
|
||||
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "piped data no chunk.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
|
||||
const contentSize = 100
|
||||
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
|
||||
file.Hashes = uploadHash.Sums()
|
||||
file.Size = int64(contentSize) // use correct size when checking
|
||||
file.Check(t, obj, f.Precision())
|
||||
|
||||
// Re-read the object and check again
|
||||
obj, err = f.NewObject(ctx, file.Path)
|
||||
require.NoError(t, err)
|
||||
file.Check(t, obj, f.Precision())
|
||||
|
||||
// Delete the object
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user