mirror of
https://github.com/rclone/rclone.git
synced 2026-01-27 06:43:27 +00:00
Compare commits
171 Commits
circleci
...
fix-mega-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48fa6f5700 | ||
|
|
b4b59c53f1 | ||
|
|
77b42aa33a | ||
|
|
910c80bd02 | ||
|
|
9049bb62ca | ||
|
|
7aa2b4191c | ||
|
|
41ed33b08e | ||
|
|
f3b0f8a9f0 | ||
|
|
65a82fe77d | ||
|
|
c892a6f8ef | ||
|
|
02c777ffbf | ||
|
|
bc45f6f952 | ||
|
|
3d807ab449 | ||
|
|
5d33236050 | ||
|
|
a4d572d004 | ||
|
|
58f280b8a2 | ||
|
|
ec09de1628 | ||
|
|
6abaa9e22c | ||
|
|
e8b92f4853 | ||
|
|
50a3a96e27 | ||
|
|
8950b586c4 | ||
|
|
3f40849343 | ||
|
|
7271a404db | ||
|
|
7d0d7e66ca | ||
|
|
0cac9d9bd0 | ||
|
|
8c1edf410c | ||
|
|
1833167d10 | ||
|
|
455b9280ba | ||
|
|
45e440d356 | ||
|
|
593de059be | ||
|
|
c78d1dd18b | ||
|
|
2a82aca225 | ||
|
|
7712b780ba | ||
|
|
5c2dfeee46 | ||
|
|
572d302620 | ||
|
|
eff11b44cf | ||
|
|
15b1feea9d | ||
|
|
6337cc70d3 | ||
|
|
d210fecf3b | ||
|
|
f962fb9499 | ||
|
|
7f378ca8e3 | ||
|
|
9a5ea9c8a8 | ||
|
|
d15425e8c8 | ||
|
|
b3faee9471 | ||
|
|
5271fe3b3f | ||
|
|
7da1c84a7f | ||
|
|
cbdab14057 | ||
|
|
7b1274e29a | ||
|
|
d21ddf280c | ||
|
|
135717e12b | ||
|
|
6b55b8b133 | ||
|
|
b94b2a3723 | ||
|
|
e2914c0097 | ||
|
|
fd51f24906 | ||
|
|
4615343b73 | ||
|
|
1dc8bcd48c | ||
|
|
def411da62 | ||
|
|
f73dae1e77 | ||
|
|
77a520c97c | ||
|
|
23bf6bb4d8 | ||
|
|
04eb96b50b | ||
|
|
b9bd15a8c9 | ||
|
|
b581f2de26 | ||
|
|
5cef5f8b49 | ||
|
|
8d8fad724b | ||
|
|
4098907511 | ||
|
|
5b8a339baf | ||
|
|
3e53376a49 | ||
|
|
d122d1d191 | ||
|
|
35d6ff89bf | ||
|
|
53bec33027 | ||
|
|
3304bb7a56 | ||
|
|
f55a99218c | ||
|
|
6e053ecbd0 | ||
|
|
7e738c9d71 | ||
|
|
7689bd7e21 | ||
|
|
33f129fbbc | ||
|
|
a8adce9c59 | ||
|
|
6ae7bd7914 | ||
|
|
32af4cd6f3 | ||
|
|
ced2616da5 | ||
|
|
b90e4a8769 | ||
|
|
00b2c02bf4 | ||
|
|
33aea5d43f | ||
|
|
13d8b7979d | ||
|
|
57c1284df7 | ||
|
|
f0c2249086 | ||
|
|
6ba08b8612 | ||
|
|
c8d3e57418 | ||
|
|
d5cd026547 | ||
|
|
6c0a749a42 | ||
|
|
4b9fdb8475 | ||
|
|
dac20093c5 | ||
|
|
d211347d46 | ||
|
|
4837bc3546 | ||
|
|
69c51325bb | ||
|
|
05e4f10436 | ||
|
|
a98a750fc9 | ||
|
|
c09b62a088 | ||
|
|
a56c9ab61d | ||
|
|
97a218903c | ||
|
|
4627ac5709 | ||
|
|
1e7144eb63 | ||
|
|
f29e5b6e7d | ||
|
|
25a0e7e8aa | ||
|
|
262ba28dec | ||
|
|
74f6300875 | ||
|
|
86dcb54c38 | ||
|
|
25a0703b45 | ||
|
|
32d5af8fb6 | ||
|
|
44b603d2bd | ||
|
|
349112df6b | ||
|
|
fef8b98be2 | ||
|
|
6750af6167 | ||
|
|
8681ef36d6 | ||
|
|
ec9914205f | ||
|
|
ccecfa9cb1 | ||
|
|
c41812fc88 | ||
|
|
d98d1be3fe | ||
|
|
661dc568f3 | ||
|
|
1e4691f951 | ||
|
|
be674faff1 | ||
|
|
c68c919cea | ||
|
|
59dba1de88 | ||
|
|
49d6d6425c | ||
|
|
28cc2009d4 | ||
|
|
dd4fe9ff60 | ||
|
|
899f285319 | ||
|
|
4788545b05 | ||
|
|
1934426789 | ||
|
|
643192b347 | ||
|
|
1031bcfc5a | ||
|
|
ce00c0a0d9 | ||
|
|
1164eed2af | ||
|
|
557edecd40 | ||
|
|
b242b0a078 | ||
|
|
08b86cc94b | ||
|
|
56544bb2fd | ||
|
|
70e043e641 | ||
|
|
c49a71f438 | ||
|
|
5f07bbf8ce | ||
|
|
2f10472df3 | ||
|
|
ab89e93968 | ||
|
|
070a8bfcd8 | ||
|
|
8fe87c8157 | ||
|
|
8fb44a822d | ||
|
|
3cff258577 | ||
|
|
66347aff2a | ||
|
|
b8b12a4000 | ||
|
|
8c038326b9 | ||
|
|
fd4b25932c | ||
|
|
4374fd1df1 | ||
|
|
b6065561cf | ||
|
|
ef7bfd3f03 | ||
|
|
ae2edc3b5b | ||
|
|
0baafb158f | ||
|
|
ba121eddf0 | ||
|
|
2e80e035c9 | ||
|
|
ea9b6087cf | ||
|
|
6959c997e2 | ||
|
|
25786cafd3 | ||
|
|
23dc313fa5 | ||
|
|
1a16849df0 | ||
|
|
3b68340eac | ||
|
|
7982aaf151 | ||
|
|
7b29ed8ec1 | ||
|
|
c93e0ff8ee | ||
|
|
3b91fb6a2f | ||
|
|
7d8c15c030 | ||
|
|
bfbddab46b | ||
|
|
e09a4ff019 |
@@ -1,50 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
- make GOTAGS=cmount racequicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
make circleci_upload
|
||||
|
||||
- store_artifacts:
|
||||
path: build
|
||||
250
.github/workflows/build.yml
vendored
Normal file
250
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
||||
|
||||
name: build
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.10
|
||||
os: ubuntu-latest
|
||||
go: '1.10.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
if: matrix.deploy && github.head_ref == ''
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
if: github.head_ref == ''
|
||||
128
.travis.yml
128
.travis.yml
@@ -1,128 +0,0 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/rclone/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||
- GO111MODULE=off
|
||||
- GITHUB_USER=ncw
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.13.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
- BUILD_FLAGS='-include "^linux/"'
|
||||
- DEPLOY=true
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.13.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.13.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.13.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||
- DEPLOY=true
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.13.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||
@@ -341,6 +341,12 @@ Getting going
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use fs/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `go install -tags noencode`
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
|
||||
@@ -367,12 +373,54 @@ Or if you want to run the integration tests manually:
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Writing a plugin ##
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
@@ -12,10 +12,11 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
RUN apk --no-cache add ca-certificates fuse
|
||||
|
||||
WORKDIR /root/
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone .
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
ENTRYPOINT [ "./rclone" ]
|
||||
WORKDIR /data
|
||||
ENV XDG_CONFIG_HOME=/config
|
||||
|
||||
2
MANUAL.html
generated
2
MANUAL.html
generated
@@ -15285,7 +15285,7 @@ export NO_PROXY=$no_proxy</code></pre>
|
||||
<pre><code>mkdir -p /etc/ssl/certs/
|
||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||
ntpclient -s -h pool.ntp.org</code></pre>
|
||||
<p>The two environment variables <code>SSL_CERT_FILE</code> and <code>SSL_CERT_DIR</code>, mentioned in the <a href="https://godoc.org/crypto/x509">x509 pacakge</a>, provide an additional way to provide the SSL root certificates.</p>
|
||||
<p>The two environment variables <code>SSL_CERT_FILE</code> and <code>SSL_CERT_DIR</code>, mentioned in the <a href="https://godoc.org/crypto/x509">x509 package</a>, provide an additional way to provide the SSL root certificates.</p>
|
||||
<p>Note that you may need to add the <code>--insecure</code> option to the <code>curl</code> command line if it doesn’t work without.</p>
|
||||
<pre><code>curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt</code></pre>
|
||||
<h3 id="rclone-gives-failed-to-load-config-file-function-not-implemented-error">Rclone gives Failed to load config file: function not implemented error</h3>
|
||||
|
||||
2
MANUAL.md
generated
2
MANUAL.md
generated
@@ -20626,7 +20626,7 @@ curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bag
|
||||
ntpclient -s -h pool.ntp.org
|
||||
```
|
||||
|
||||
The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in the [x509 pacakge](https://godoc.org/crypto/x509),
|
||||
The two environment variables `SSL_CERT_FILE` and `SSL_CERT_DIR`, mentioned in the [x509 package](https://godoc.org/crypto/x509),
|
||||
provide an additional way to provide the SSL root certificates.
|
||||
|
||||
Note that you may need to add the `--insecure` option to the `curl` command line if it doesn't work without.
|
||||
|
||||
2
MANUAL.txt
generated
2
MANUAL.txt
generated
@@ -20570,7 +20570,7 @@ time which is important for SSL to work properly.
|
||||
ntpclient -s -h pool.ntp.org
|
||||
|
||||
The two environment variables SSL_CERT_FILE and SSL_CERT_DIR, mentioned
|
||||
in the x509 pacakge, provide an additional way to provide the SSL root
|
||||
in the x509 package, provide an additional way to provide the SSL root
|
||||
certificates.
|
||||
|
||||
Note that you may need to add the --insecure option to the curl command
|
||||
|
||||
55
Makefile
55
Makefile
@@ -1,18 +1,29 @@
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Branch we are working on
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
# Version of last release (may not be on this branch)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
@@ -33,8 +44,9 @@ endif
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
@@ -43,8 +55,8 @@ vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -75,8 +87,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
@@ -191,24 +203,25 @@ serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -22,13 +22,14 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/ficher/)
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
@@ -76,6 +77,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
@@ -56,8 +56,7 @@ Can be fixed with
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug then a
|
||||
point release is necessary.
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
@@ -72,7 +71,7 @@ Now
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=${NEW_TAG} tag
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
@@ -90,8 +89,8 @@ Now
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog
|
||||
* git checkout ${BASE_TAG}-fixes docs/content/changelog.md
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- '*'
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
GO_INSTALL_ARCH: amd64
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
BUILD_FLAGS: '-include "^linux/"'
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_amd64:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_386:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
GO_INSTALL_ARCH: 386
|
||||
BUILD_FLAGS: '-include "^windows/386" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
MAKE_COMPILE_ALL: true
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.13
|
||||
MAKE_QUICKTEST: true
|
||||
go1.12:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.12.9
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||
echo "Latest Go version: $latestGo"
|
||||
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||
continueOnError: false
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
continueOnError: false
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
- task: CacheBeta@0
|
||||
inputs:
|
||||
key: go-build-cache | "$(Agent.JobName)"
|
||||
path: $(GOCACHE)
|
||||
continueOnError: true
|
||||
displayName: Cache go build
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
continueOnError: false
|
||||
displayName: Install Libraries on Linux
|
||||
|
||||
- bash: |
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
continueOnError: false
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GO_INSTALL_ARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
continueOnError: false
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
continueOnError: false
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
continueOnError: false
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
|
||||
Write-Host "Extracting Go"
|
||||
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
continueOnError: false
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# Display environment for debugging
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
# Run Tests
|
||||
|
||||
- bash: |
|
||||
make
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
make check
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Code quality test
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make
|
||||
make compile_all
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Compile all architectures test
|
||||
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
|
||||
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Deploy built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -38,6 +39,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
enc = encodings.AmazonCloudDrive
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
@@ -384,7 +386,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
subFolder, resp, err = folder.GetFolder(enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -411,7 +413,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
info, resp, err = folder.CreateFolder(enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -479,6 +481,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
if !hasValidParent {
|
||||
continue
|
||||
}
|
||||
*node.Name = enc.ToStandardName(*node.Name)
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
@@ -668,7 +671,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
start := time.Now()
|
||||
f.tokenRenewer.Start()
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
info, resp, err = folder.Put(in, enc.FromStandardName(leaf))
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
@@ -1038,7 +1041,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
info, resp, err = folder.GetFile(enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1158,7 +1161,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(newName)
|
||||
newInfo, resp, err = info.Rename(enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
@@ -1354,10 +1357,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
name := enc.ToStandardName(*node.Name)
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + *node.Name
|
||||
path = path + "/" + name
|
||||
} else {
|
||||
path = *node.Name
|
||||
path = name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -60,6 +61,8 @@ const (
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
)
|
||||
|
||||
const enc = encodings.AzureBlob
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -208,7 +211,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(containerName), enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
@@ -575,18 +579,18 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := &response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
// }
|
||||
if !strings.HasPrefix(file.Name, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||
remote := enc.ToStandardPath(file.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(prefix):]
|
||||
remote = remote[len(prefix):]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
continue // skip directory marker
|
||||
}
|
||||
@@ -602,6 +606,7 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
@@ -665,7 +670,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
return entries, nil
|
||||
}
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), container.Properties.LastModified)
|
||||
f.cache.MarkOK(container.Name)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
@@ -1508,4 +1513,6 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.B2
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
@@ -399,7 +402,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
allowedBucket := enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
@@ -620,11 +623,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var request = api.ListFileNamesRequest{
|
||||
BucketID: bucketID,
|
||||
MaxFileCount: chunkSize,
|
||||
Prefix: directory,
|
||||
Prefix: enc.FromStandardPath(directory),
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
if directory != "" {
|
||||
request.StartFileName = directory
|
||||
request.StartFileName = enc.FromStandardPath(directory)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -644,6 +647,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
for i := range response.Files {
|
||||
file := &response.Files[i]
|
||||
file.Name = enc.ToStandardPath(file.Name)
|
||||
// Finish if file name no longer has prefix
|
||||
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
return nil
|
||||
@@ -844,6 +848,7 @@ func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
@@ -965,7 +970,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
}
|
||||
var request = api.CreateBucketRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
Name: bucket,
|
||||
Name: enc.FromStandardName(bucket),
|
||||
Type: "allPrivate",
|
||||
}
|
||||
var response api.Bucket
|
||||
@@ -1049,7 +1054,7 @@ func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
|
||||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: bucketPath,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1077,7 +1082,7 @@ func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
|
||||
}
|
||||
var request = api.DeleteFileRequest{
|
||||
ID: ID,
|
||||
Name: Name,
|
||||
Name: enc.FromStandardPath(Name),
|
||||
}
|
||||
var response api.File
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -1215,7 +1220,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: dstPath,
|
||||
Name: enc.FromStandardPath(dstPath),
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
@@ -1263,7 +1268,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
|
||||
}
|
||||
var request = api.GetDownloadAuthorizationRequest{
|
||||
BucketID: bucketID,
|
||||
FileNamePrefix: path.Join(f.root, remote),
|
||||
FileNamePrefix: enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
ValidDurationInSeconds: validDurationInSeconds,
|
||||
}
|
||||
var response api.GetDownloadAuthorizationResponse
|
||||
@@ -1498,7 +1503,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: bucketPath, // copy to same name
|
||||
Name: enc.FromStandardPath(bucketPath), // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
@@ -1600,7 +1605,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(bucket) + "/" + urlEncode(bucketPath)
|
||||
opts.Path += "/file/" + urlEncode(enc.FromStandardName(bucket)) + "/" + urlEncode(enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1797,7 +1802,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Body: in,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(bucketPath),
|
||||
"X-Bz-File-Name": urlEncode(enc.FromStandardPath(bucketPath)),
|
||||
"Content-Type": fs.MimeType(ctx, src),
|
||||
sha1Header: calculatedSha1,
|
||||
timeHeader: timeString(modTime),
|
||||
|
||||
@@ -111,7 +111,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: bucketPath,
|
||||
Name: enc.FromStandardPath(bucketPath),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
|
||||
@@ -202,3 +202,23 @@ type CommitUpload struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// ConfigJSON defines the shape of a box config.json
|
||||
type ConfigJSON struct {
|
||||
BoxAppSettings AppSettings `json:"boxAppSettings"`
|
||||
EnterpriseID string `json:"enterpriseID"`
|
||||
}
|
||||
|
||||
// AppSettings defines the shape of the boxAppSettings within box config.json
|
||||
type AppSettings struct {
|
||||
ClientID string `json:"clientID"`
|
||||
ClientSecret string `json:"clientSecret"`
|
||||
AppAuth AppAuth `json:"appAuth"`
|
||||
}
|
||||
|
||||
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
|
||||
type AppAuth struct {
|
||||
PublicKeyID string `json:"publicKeyID"`
|
||||
PrivateKey string `json:"privateKey"`
|
||||
Passphrase string `json:"passphrase"`
|
||||
}
|
||||
|
||||
@@ -11,8 +11,12 @@ package box
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -21,6 +25,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -28,15 +36,20 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const enc = encodings.Box
|
||||
|
||||
const (
|
||||
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
@@ -49,6 +62,7 @@ const (
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -73,9 +87,34 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -84,6 +123,19 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
@@ -98,6 +150,74 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
queryParams := map[string]string{
|
||||
"client_id": boxConfig.BoxAppSettings.ClientID,
|
||||
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
||||
}
|
||||
|
||||
return queryParams
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
@@ -181,18 +301,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for box
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -380,7 +488,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
mkdir := api.CreateFolder{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: pathID,
|
||||
},
|
||||
@@ -446,7 +554,7 @@ OUTER:
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -682,9 +790,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Path: "/files/" + srcObj.id + "/copy",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
@@ -723,7 +830,7 @@ func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
move := api.UpdateFileMove{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
@@ -924,11 +1031,6 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
@@ -1053,7 +1155,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
ContentCreatedAt: api.Time(modTime),
|
||||
Parent: api.Parent{
|
||||
|
||||
@@ -38,7 +38,7 @@ func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID stri
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
request.FileName = enc.FromStandardName(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
||||
1
backend/cache/cache_test.go
vendored
1
backend/cache/cache_test.go
vendored
@@ -19,5 +19,6 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
2199
backend/chunker/chunker.go
Normal file
2199
backend/chunker/chunker.go
Normal file
File diff suppressed because it is too large
Load Diff
605
backend/chunker/chunker_internal_test.go
Normal file
605
backend/chunker/chunker_internal_test.go
Normal file
@@ -0,0 +1,605 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
||||
)
|
||||
|
||||
// test that chunking does not break large uploads
|
||||
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
// restore original settings (f is pointer, f.opt is struct)
|
||||
f.opt = saveOpt
|
||||
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
||||
}()
|
||||
|
||||
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
||||
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
||||
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
||||
}
|
||||
|
||||
assertFormatValid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assertFormatInvalid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
}
|
||||
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
|
||||
assert.Panics(t, func() {
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
|
||||
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
|
||||
}
|
||||
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
|
||||
assert.Equal(t, wantMainName, gotMainName)
|
||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
||||
assert.Equal(t, wantXactNo, gotXactNo)
|
||||
}
|
||||
|
||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
||||
|
||||
// valid formats
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
if newFormatSupported {
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
}
|
||||
|
||||
// invalid formats
|
||||
assertFormatInvalid(`chunk-#`)
|
||||
assertFormatInvalid(`*-chunk`)
|
||||
assertFormatInvalid(`*-*-chunk-#`)
|
||||
assertFormatInvalid(`*-chunk-#-#`)
|
||||
assertFormatInvalid(`#-chunk-*`)
|
||||
assertFormatInvalid(`*/#`)
|
||||
|
||||
assertFormatValid(`*#`)
|
||||
assertFormatInvalid(`**#`)
|
||||
assertFormatInvalid(`#*`)
|
||||
assertFormatInvalid(``)
|
||||
assertFormatInvalid(`-`)
|
||||
|
||||
// quick tests
|
||||
if newFormatSupported {
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
f.opt.StartFrom = 1
|
||||
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", -1)
|
||||
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
|
||||
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
|
||||
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
}
|
||||
|
||||
// prepare format for long tests
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
|
||||
f.opt.StartFrom = 2
|
||||
|
||||
// valid data chunks
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
|
||||
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
|
||||
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
|
||||
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
|
||||
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
|
||||
|
||||
// parsing invalid data chunk names
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
|
||||
|
||||
// valid control chunks
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
|
||||
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
|
||||
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
|
||||
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
|
||||
|
||||
// valid temporary control chunks
|
||||
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
|
||||
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
|
||||
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
|
||||
|
||||
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
|
||||
|
||||
// parsing invalid control chunk names
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
|
||||
|
||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
|
||||
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
|
||||
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
|
||||
assertMakeNamePanics("fish", -1, "in", -1)
|
||||
assertMakeNamePanics("fish", -1, "up", 4)
|
||||
assertMakeNamePanics("fish", -1, "x", -1)
|
||||
assertMakeNamePanics("fish", -1, "c", 4)
|
||||
|
||||
// base file name can sometimes look like a valid chunk name
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
|
||||
|
||||
// attempts to make invalid chunk names
|
||||
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", -1)
|
||||
assertMakeNamePanics("fish", -1, "info_", -1)
|
||||
assertMakeNamePanics("fish", -2, ".bind", -3)
|
||||
assertMakeNamePanics("fish", -2, "bind.", -3)
|
||||
|
||||
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
|
||||
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
|
||||
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", 15678)
|
||||
assertMakeNamePanics("fish", -1, "info_", 999)
|
||||
assertMakeNamePanics("fish", -2, ".bind", 0)
|
||||
assertMakeNamePanics("fish", -2, "bind.", 0)
|
||||
}
|
||||
|
||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
const dir = "small"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
checkSmallFileInternals := func(obj fs.Object) {
|
||||
assert.NotNil(t, obj)
|
||||
o, ok := obj.(*Object)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, o)
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case !f.useMeta:
|
||||
// If meta format is "none", non-chunked file (even empty)
|
||||
// internally is a single chunk without meta object.
|
||||
assert.Nil(t, o.main)
|
||||
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
case f.hashAll:
|
||||
// Consistent hashing forces meta object on small files too
|
||||
assert.NotNil(t, o.main)
|
||||
assert.True(t, o.isComposite())
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
default:
|
||||
// normally non-chunked file is kept in the Object's main field
|
||||
assert.NotNil(t, o.main)
|
||||
assert.False(t, o.isComposite())
|
||||
assert.Equal(t, 0, len(o.chunks))
|
||||
}
|
||||
}
|
||||
|
||||
checkContents := func(obj fs.Object, contents string) {
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
}
|
||||
|
||||
checkHashsum := func(obj fs.Object) {
|
||||
var ht hash.Type
|
||||
switch {
|
||||
case !f.hashAll:
|
||||
return
|
||||
case f.useMD5:
|
||||
ht = hash.MD5
|
||||
case f.useSHA1:
|
||||
ht = hash.SHA1
|
||||
default:
|
||||
return
|
||||
}
|
||||
// even empty files must have hashsum in consistent mode
|
||||
sum, err := obj.Hash(ctx, ht)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, sum, "")
|
||||
}
|
||||
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
checkHashsum(put)
|
||||
|
||||
// objects returned by Put and NewObject must have similar structure
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
checkSmallFileInternals(obj)
|
||||
checkContents(obj, contents)
|
||||
checkHashsum(obj)
|
||||
|
||||
_ = obj.Remove(ctx)
|
||||
_ = put.Remove(ctx) // for good
|
||||
}
|
||||
|
||||
checkSmallFile("emptyfile", "")
|
||||
checkSmallFile("smallfile", "Ok")
|
||||
}
|
||||
|
||||
func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "corrupted"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = true
|
||||
|
||||
contents := random.String(250)
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
const overlapMessage = "chunk overlap"
|
||||
|
||||
assertOverlapError := func(err error) {
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), overlapMessage)
|
||||
}
|
||||
}
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
assertOverlapError(err)
|
||||
|
||||
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
||||
assertOverlapError(err)
|
||||
|
||||
// you can freely read chunks (if you have an object)
|
||||
r, err := billyChunk4.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, contents, string(chunkContents))
|
||||
|
||||
// but you can't change them
|
||||
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// Remove isn't special, you can't corrupt files even if you have an object
|
||||
err = billyChunk4.Remove(ctx)
|
||||
assertOverlapError(err)
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, willyChunk)
|
||||
|
||||
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// operations.Move will return error when chunker's Move refused
|
||||
// to corrupt target file, but reverts to copy/delete method
|
||||
// still trying to delete target chunk. Chunker must come to rescue.
|
||||
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
||||
assertOverlapError(err)
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "wreaked"
|
||||
const wreakNumber = 10200300
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.Error(t, err)
|
||||
_, err = f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = wreak.Remove(ctx)
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testMetadataInput(t *testing.T, f *Fs) {
|
||||
const minChunkForTest = 50
|
||||
if f.opt.ChunkSize < minChunkForTest {
|
||||
t.Skip("this test requires chunks that fit metadata")
|
||||
}
|
||||
|
||||
const dir = "usermeta"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
||||
|
||||
o, ok := obj.(*Object)
|
||||
assert.NotNil(t, ok)
|
||||
if o != nil {
|
||||
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
||||
o = nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = obj.Remove(ctx)
|
||||
_ = part.Remove(ctx)
|
||||
}()
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
|
||||
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
||||
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
||||
runSubtest(pastMeta, "past")
|
||||
|
||||
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
||||
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
if *UploadKilobytes <= 0 {
|
||||
t.Skip("-upload-kilobytes is not set")
|
||||
}
|
||||
testPutLarge(t, f, *UploadKilobytes)
|
||||
})
|
||||
t.Run("ChunkNameFormat", func(t *testing.T) {
|
||||
testChunkNameFormat(t, f)
|
||||
})
|
||||
t.Run("SmallFileInternals", func(t *testing.T) {
|
||||
testSmallFileInternals(t, f)
|
||||
})
|
||||
t.Run("PreventCorruption", func(t *testing.T) {
|
||||
testPreventCorruption(t, f)
|
||||
})
|
||||
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
||||
testChunkNumberOverflow(t, f)
|
||||
})
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
58
backend/chunker/chunker_test.go
Normal file
58
backend/chunker/chunker_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Test the Chunker filesystem interface
|
||||
package chunker_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
"github.com/rclone/rclone/backend/chunker"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against a concrete remote
|
||||
// set by the -remote flag. If the flag is not set, it creates a
|
||||
// dynamic chunker overlay wrapping a local temporary directory.
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*chunker.Object)(nil),
|
||||
SkipBadWindowsCharacters: !*UseBadChars,
|
||||
UnimplementableObjectMethods: []string{
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
name := "TestChunker"
|
||||
opt.RemoteName = name + ":"
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -208,21 +208,6 @@ func (c *cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// check to see if the byte string is valid with no control characters
|
||||
// from 0x00 to 0x1F and is a valid UTF-8 string
|
||||
func checkValidString(buf []byte) error {
|
||||
for i := range buf {
|
||||
c := buf[i]
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
return ErrorBadDecryptControlChar
|
||||
}
|
||||
}
|
||||
if !utf8.Valid(buf) {
|
||||
return ErrorBadDecryptUTF8
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
@@ -294,10 +279,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = checkValidString(plaintext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(plaintext), err
|
||||
}
|
||||
|
||||
|
||||
@@ -44,69 +44,6 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
func TestValidString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected error
|
||||
}{
|
||||
{"", nil},
|
||||
{"\x01", ErrorBadDecryptControlChar},
|
||||
{"a\x02", ErrorBadDecryptControlChar},
|
||||
{"abc\x03", ErrorBadDecryptControlChar},
|
||||
{"abc\x04def", ErrorBadDecryptControlChar},
|
||||
{"\x05d", ErrorBadDecryptControlChar},
|
||||
{"\x06def", ErrorBadDecryptControlChar},
|
||||
{"\x07", ErrorBadDecryptControlChar},
|
||||
{"\x08", ErrorBadDecryptControlChar},
|
||||
{"\x09", ErrorBadDecryptControlChar},
|
||||
{"\x0A", ErrorBadDecryptControlChar},
|
||||
{"\x0B", ErrorBadDecryptControlChar},
|
||||
{"\x0C", ErrorBadDecryptControlChar},
|
||||
{"\x0D", ErrorBadDecryptControlChar},
|
||||
{"\x0E", ErrorBadDecryptControlChar},
|
||||
{"\x0F", ErrorBadDecryptControlChar},
|
||||
{"\x10", ErrorBadDecryptControlChar},
|
||||
{"\x11", ErrorBadDecryptControlChar},
|
||||
{"\x12", ErrorBadDecryptControlChar},
|
||||
{"\x13", ErrorBadDecryptControlChar},
|
||||
{"\x14", ErrorBadDecryptControlChar},
|
||||
{"\x15", ErrorBadDecryptControlChar},
|
||||
{"\x16", ErrorBadDecryptControlChar},
|
||||
{"\x17", ErrorBadDecryptControlChar},
|
||||
{"\x18", ErrorBadDecryptControlChar},
|
||||
{"\x19", ErrorBadDecryptControlChar},
|
||||
{"\x1A", ErrorBadDecryptControlChar},
|
||||
{"\x1B", ErrorBadDecryptControlChar},
|
||||
{"\x1C", ErrorBadDecryptControlChar},
|
||||
{"\x1D", ErrorBadDecryptControlChar},
|
||||
{"\x1E", ErrorBadDecryptControlChar},
|
||||
{"\x1F", ErrorBadDecryptControlChar},
|
||||
{"\x20", nil},
|
||||
{"\x7E", nil},
|
||||
{"\x7F", ErrorBadDecryptControlChar},
|
||||
{"£100", nil},
|
||||
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
|
||||
{"£100", nil},
|
||||
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
|
||||
{"a", nil}, // Valid ASCII
|
||||
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
|
||||
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
|
||||
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
|
||||
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
|
||||
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
|
||||
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
|
||||
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
|
||||
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
|
||||
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
|
||||
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
|
||||
} {
|
||||
actual := checkValidString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -210,8 +147,6 @@ func TestDecryptSegment(t *testing.T) {
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
|
||||
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -47,6 +48,8 @@ import (
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const enc = encodings.Drive
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
@@ -599,11 +602,10 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
}
|
||||
var stems []string
|
||||
if title != "" {
|
||||
searchTitle := enc.FromStandardName(title)
|
||||
// Escaping the backslash isn't documented but seems to work
|
||||
searchTitle := strings.Replace(title, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `\`, `\\`, -1)
|
||||
searchTitle = strings.Replace(searchTitle, `'`, `\'`, -1)
|
||||
// Convert / to / for search
|
||||
searchTitle = strings.Replace(searchTitle, "/", "/", -1)
|
||||
|
||||
var titleQuery bytes.Buffer
|
||||
_, _ = fmt.Fprintf(&titleQuery, "(name='%s'", searchTitle)
|
||||
@@ -671,11 +673,9 @@ OUTER:
|
||||
return false, errors.Wrap(err, "couldn't list directory")
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
// Convert / to / for listing purposes
|
||||
item.Name = strings.Replace(item.Name, "/", "/", -1)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
|
||||
if title != "" && title != item.Name {
|
||||
found := false
|
||||
for _, stem := range stems {
|
||||
@@ -1210,6 +1210,7 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
createInfo := &drive.File{
|
||||
@@ -1645,6 +1646,7 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
@@ -2316,6 +2318,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
|
||||
// find the new path
|
||||
if change.File != nil {
|
||||
change.File.Name = enc.ToStandardName(change.File.Name)
|
||||
changeType := fs.EntryDirectory
|
||||
if change.File.MimeType != driveFolderType {
|
||||
changeType = fs.EntryObject
|
||||
|
||||
@@ -39,11 +39,13 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -52,6 +54,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Dropbox
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "5jcck7diasz0rqy"
|
||||
@@ -102,10 +106,14 @@ var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
|
||||
// DbHashType is the hash.Type for Dropbox
|
||||
DbHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
@@ -372,14 +380,15 @@ func (f *Fs) setRoot(root string) {
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
}
|
||||
@@ -466,7 +475,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Path: enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
@@ -479,8 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.ListFolderAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
@@ -517,7 +525,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
// Only the last element is reliably cased in PathDisplay
|
||||
entryPath := metadata.PathDisplay
|
||||
leaf := path.Base(entryPath)
|
||||
leaf := enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
@@ -575,7 +583,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// create it
|
||||
arg2 := files.CreateFolderArg{
|
||||
Path: root,
|
||||
Path: enc.FromStandardPath(root),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
@@ -601,6 +609,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
@@ -657,9 +666,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Copy
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -691,7 +703,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
@@ -720,9 +734,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -747,7 +764,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
absPath := enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
@@ -758,7 +775,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
@@ -820,9 +838,12 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// ...apparently not necessary
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcPath
|
||||
arg.ToPath = dstPath
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: enc.FromStandardPath(srcPath),
|
||||
ToPath: enc.FromStandardPath(dstPath),
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -863,7 +884,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -888,7 +909,7 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Dropbox {
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
@@ -977,7 +998,10 @@ func (o *Object) Storable() bool {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
arg := files.DownloadArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.srv.Download(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -986,7 +1010,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
switch e := err.(type) {
|
||||
case files.DownloadAPIError:
|
||||
// Don't attempt to retry copyright violation errors
|
||||
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
@@ -1104,10 +1128,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||
commitInfo := files.NewCommitInfo(enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
@@ -1132,7 +1155,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -107,6 +107,10 @@ func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesLi
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
item.Filename = enc.ToStandardName(item.Filename)
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
@@ -131,6 +135,11 @@ func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *Fol
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
folder := &foldersList.SubFolders[i]
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
@@ -166,7 +175,6 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
item.Filename = restoreReservedChars(item.Filename)
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
@@ -176,7 +184,6 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
@@ -206,7 +213,7 @@ func getRemote(dir, fileName string) string {
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := replaceReservedChars(leaf)
|
||||
name := enc.FromStandardName(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
@@ -316,7 +323,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = replaceReservedChars(fileName)
|
||||
fileName = enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -28,6 +29,8 @@ const (
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
const enc = encodings.Fichier
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
@@ -141,8 +144,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, rootleaf string, config configmap.Mapper) (fs.Fs, error) {
|
||||
root := replaceReservedChars(rootleaf)
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
@@ -346,7 +348,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: int(fileSize),
|
||||
Size: fileSize,
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
|
||||
@@ -43,7 +43,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.file.Size)
|
||||
return o.file.Size
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
@@ -74,7 +74,7 @@ func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, int64(o.file.Size))
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Translate file names for 1fichier
|
||||
|
||||
1Fichier reserved characters
|
||||
|
||||
The following characters are 1Fichier reserved characters, and can't
|
||||
be used in 1Fichier folder and file names.
|
||||
|
||||
*/
|
||||
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// 1Fichier has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'\'': ''', // FULLWIDTH APOSTROPHE
|
||||
'$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
'`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package fichier
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{"\"'<>/\\$`", `"'<>/\$``},
|
||||
{" leading space", "␠leading space"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,7 +69,7 @@ type SharedFolderResponse []SharedFile
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
@@ -93,7 +93,7 @@ type File struct {
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
|
||||
@@ -17,11 +17,14 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const enc = encodings.FTP
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -62,6 +65,11 @@ func init() {
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -76,6 +84,7 @@ type Options struct {
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -141,6 +150,9 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
@@ -295,10 +307,37 @@ func translateErrorDir(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
||||
func entryToStandard(entry *ftp.Entry) {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if entry.Name == "." || entry.Name == ".." {
|
||||
return
|
||||
}
|
||||
entry.Name = enc.ToStandardName(entry.Name)
|
||||
entry.Target = enc.ToStandardPath(entry.Target)
|
||||
}
|
||||
|
||||
// dirFromStandardPath returns dir in encoded form.
|
||||
func dirFromStandardPath(dir string) string {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if dir == "." || dir == ".." {
|
||||
return dir
|
||||
}
|
||||
return enc.FromStandardPath(dir)
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
Type: ftp.EntryTypeFolder,
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
@@ -306,12 +345,13 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
@@ -366,7 +406,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
@@ -378,7 +418,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(path.Join(f.root, dir))
|
||||
result, err := c.List(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
@@ -415,6 +455,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
entryToStandard(object)
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
@@ -484,19 +525,21 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
if files[i].Name == base {
|
||||
file := files[i]
|
||||
entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
@@ -506,6 +549,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
@@ -527,7 +571,7 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
err = c.MakeDir(dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
@@ -563,7 +607,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
err = c.RemoveDir(dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
@@ -584,8 +628,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -638,8 +682,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
dirFromStandardPath(srcPath),
|
||||
dirFromStandardPath(dstPath),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -765,7 +809,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
fd, err := c.RetrFrom(enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -800,7 +844,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
err = c.Stor(enc.FromStandardPath(path), in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
@@ -830,7 +874,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(path)
|
||||
err = c.Delete(enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -68,6 +69,8 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
const enc = encodings.GoogleCloudStorage
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -349,7 +352,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -438,8 +442,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Context(ctx).Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -522,6 +527,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
continue
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -537,11 +543,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[len(prefix):]
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
@@ -613,7 +620,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
d := fs.NewDir(enc.ToStandardName(bucket.Name), time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -77,6 +78,26 @@ Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
|
||||
- find its size
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -86,6 +107,7 @@ directories.`,
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
@@ -415,30 +437,49 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for remote := range in {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(ctx); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
in <- remote
|
||||
}
|
||||
}
|
||||
close(in)
|
||||
wg.Wait()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -496,6 +537,12 @@ func (o *Object) url() string {
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
o.contentType = fs.MimeType(ctx, o)
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -36,6 +37,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
@@ -460,7 +463,7 @@ func urlPathEscape(in string) string {
|
||||
|
||||
// filePathRaw returns an unescaped file path (f.root, file)
|
||||
func (f *Fs) filePathRaw(file string) string {
|
||||
return path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, file)))
|
||||
return path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, file)))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, file)
|
||||
@@ -673,7 +676,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
d := fs.NewDir(remote, time.Time(item.ModifiedAt))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -683,7 +686,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, restoreReservedChars(item.Name))
|
||||
remote := path.Join(dir, enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -708,7 +711,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
if folder.Deleted {
|
||||
return nil
|
||||
}
|
||||
folderPath := restoreReservedChars(path.Join(folder.Path, folder.Name))
|
||||
folderPath := enc.ToStandardPath(path.Join(folder.Path, folder.Name))
|
||||
folderPathLength := len(folderPath)
|
||||
var remoteDir string
|
||||
if folderPathLength > pathPrefixLength {
|
||||
@@ -726,7 +729,7 @@ func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolde
|
||||
if file.Deleted || file.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remoteFile := path.Join(remoteDir, restoreReservedChars(file.Name))
|
||||
remoteFile := path.Join(remoteDir, enc.ToStandardName(file.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -897,7 +900,7 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, replaceReservedChars(path.Join(f.root, dest))))
|
||||
opts.Parameters.Set(method, "/"+path.Join(f.endpointURL, enc.FromStandardPath(path.Join(f.root, dest))))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1004,7 +1007,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, replaceReservedChars(srcPath))+"/", dstRemote)
|
||||
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, enc.FromStandardPath(srcPath))+"/", dstRemote)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't move directory")
|
||||
@@ -1295,7 +1298,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: md5String,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, replaceReservedChars(path.Join(o.fs.root, o.remote))),
|
||||
Path: path.Join(o.fs.opt.Mountpoint, enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,12 +15,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
const enc = encodings.Koofr
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -242,7 +245,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
return enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
@@ -293,7 +296,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
@@ -311,13 +314,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
9
backend/local/encode_darwin.go
Normal file
9
backend/local/encode_darwin.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalMacOS
|
||||
9
backend/local/encode_other.go
Normal file
9
backend/local/encode_other.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build !windows,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalUnix
|
||||
9
backend/local/encode_windows.go
Normal file
9
backend/local/encode_windows.go
Normal file
@@ -0,0 +1,9 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
)
|
||||
|
||||
const enc = encodings.LocalWindows
|
||||
@@ -142,19 +142,19 @@ type Fs struct {
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
remote string // The remote path (encoded path)
|
||||
path string // The local path (OS path)
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
@@ -183,14 +183,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
f.root = f.cleanPath(root)
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -235,12 +234,12 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
return enc.ToStandardPath(filepath.ToSlash(f.root))
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
return fmt.Sprintf("Local file system at %s", f.Root())
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -268,33 +267,27 @@ func (f *Fs) caseInsensitive() bool {
|
||||
// and returns a new path, removing the suffix as needed,
|
||||
// It also returns whether this is a translated link at all
|
||||
//
|
||||
// for regular files, dstPath is returned unchanged
|
||||
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
||||
return newDstPath, isTranslatedLink
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
translatedLink := false
|
||||
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
localPath := f.localPath(remote)
|
||||
|
||||
if f.opt.TranslateSymlinks {
|
||||
// Possibly receive a new name for dstPath
|
||||
dstPath, translatedLink = translateLink(remote, dstPath)
|
||||
// Possibly receive a new name for localPath
|
||||
localPath, translatedLink = translateLink(remote, localPath)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
path: localPath,
|
||||
translatedLink: translatedLink,
|
||||
}
|
||||
}
|
||||
@@ -302,8 +295,8 @@ func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote, dstPath)
|
||||
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote)
|
||||
if info != nil {
|
||||
o.setMetadata(info)
|
||||
} else {
|
||||
@@ -332,7 +325,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, "", nil)
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -345,10 +338,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
@@ -410,11 +400,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
@@ -431,7 +421,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
d := fs.NewDir(newRemote, fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
@@ -439,7 +429,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -452,67 +442,28 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// cleanRemote makes string a valid UTF-8 string for remote strings.
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
// It also normalises the UTF-8 and converts the slashes if necessary.
|
||||
func (f *Fs) cleanRemote(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
||||
remote = path.Join(dir, enc.ToStandardName(filename))
|
||||
|
||||
if !utf8.ValidString(filename) {
|
||||
f.warnedMu.Lock()
|
||||
if _, ok := f.warned[remote]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote)
|
||||
f.warned[remote] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
name = string([]rune(name))
|
||||
f.warnedMu.Unlock()
|
||||
}
|
||||
name = filepath.ToSlash(name)
|
||||
return name
|
||||
return
|
||||
}
|
||||
|
||||
// mapper maps raw to cleaned directory names
|
||||
type mapper struct {
|
||||
mu sync.RWMutex // mutex to protect the below
|
||||
m map[string]string // map of un-normalised directory names
|
||||
}
|
||||
|
||||
func newMapper() *mapper {
|
||||
return &mapper{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup a directory name to make a local name (reverses
|
||||
// cleanDirName)
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Load(in string) string {
|
||||
m.mu.RLock()
|
||||
out, ok := m.m[in]
|
||||
m.mu.RUnlock()
|
||||
if ok {
|
||||
return out
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// Cleans a directory name recording if it needed to be altered
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Save(in, out string) string {
|
||||
if in != out {
|
||||
m.mu.Lock()
|
||||
m.m[out] = in
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return out
|
||||
func (f *Fs) localPath(name string) string {
|
||||
return filepath.Join(f.root, filepath.FromSlash(enc.FromStandardPath(name)))
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote, "")
|
||||
o := f.newObject(src.Remote())
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -528,13 +479,13 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
err := os.MkdirAll(root, 0777)
|
||||
localPath := f.localPath(dir)
|
||||
err := os.MkdirAll(localPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == "" {
|
||||
fi, err := f.lstat(root)
|
||||
fi, err := f.lstat(localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -547,8 +498,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
return os.Remove(root)
|
||||
return os.Remove(f.localPath(dir))
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
@@ -644,7 +594,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := f.newObject(remote, "")
|
||||
dstObj := f.newObject(remote)
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
@@ -701,8 +651,8 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
|
||||
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
|
||||
srcPath := srcFs.localPath(srcRemote)
|
||||
dstPath := f.localPath(dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
_, err := os.Lstat(dstPath)
|
||||
@@ -736,7 +686,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -836,13 +786,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
// Check for control characters in the remote name and show non storable
|
||||
for _, c := range o.Remote() {
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
|
||||
return false
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
@@ -1087,7 +1030,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// Temporary Object under construction
|
||||
o := f.newObject(remote, "")
|
||||
o := f.newObject(remote)
|
||||
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
@@ -1139,49 +1082,32 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
// bigger path and not necessarily absolute
|
||||
func cleanPathFragment(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
s = filepath.Clean(s)
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
// cleanPath cleans and makes absolute the path passed in and returns
|
||||
// an OS path.
|
||||
//
|
||||
// The input might be in OS form or rclone form or a mixture, but the
|
||||
// output is in OS form.
|
||||
//
|
||||
// On windows it makes the path UNC also and replaces any characters
|
||||
// Windows can't deal with with their replacements.
|
||||
func (f *Fs) cleanPath(s string) string {
|
||||
s = cleanPathFragment(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.opt.NoUNC {
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
s = cleanWindowsName(f, s)
|
||||
} else {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -1190,63 +1116,21 @@ var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
func uncPath(l string) string {
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
if strings.HasPrefix(l, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
if strings.HasPrefix(l, `\\?\`) {
|
||||
return l
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
return `\\?\UNC\` + strings.TrimPrefix(l, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(s) {
|
||||
return `\\?\` + s
|
||||
if isAbsWinDrive.MatchString(l) {
|
||||
return `\\?\` + l
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters replacing them with _
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
}
|
||||
return name2
|
||||
return l
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -25,19 +25,6 @@ func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMapper(t *testing.T) {
|
||||
m := newMapper()
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "potato", m.Save("potato", "potato"))
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
|
||||
assert.Equal(t, m.m, map[string]string{
|
||||
"-r'áö": "-r?'a´o¨",
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
@@ -57,7 +44,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported())
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
|
||||
@@ -1,29 +1,26 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var uncTestPaths = []string{
|
||||
"C:\\Ba*d\\P|a?t<h>\\Windows\\Folder",
|
||||
"C:/Ba*d/P|a?t<h>/Windows\\Folder",
|
||||
"C:\\Windows\\Folder",
|
||||
"\\\\?\\C:\\Windows\\Folder",
|
||||
"//?/C:/Windows/Folder",
|
||||
"\\\\?\\UNC\\server\\share\\Desktop",
|
||||
"\\\\?\\unC\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\Desktop\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"C:\\AbsoluteToRoot\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path\\Very Long path",
|
||||
"\\\\server\\share\\Desktop",
|
||||
"\\\\?\\UNC\\\\share\\folder\\Desktop",
|
||||
"\\\\server\\share",
|
||||
`C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
`\\?\unC\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\Desktop\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`C:\AbsoluteToRoot\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path\Very Long path`,
|
||||
`\\server\share\Desktop`,
|
||||
`\\?\UNC\\share\folder\Desktop`,
|
||||
`\\server\share`,
|
||||
}
|
||||
|
||||
var uncTestPathsResults = []string{
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Ba*d\P|a?t<h>\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\C:\Windows\Folder`,
|
||||
`\\?\UNC\server\share\Desktop`,
|
||||
@@ -51,38 +48,23 @@ func TestUncPaths(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var utf8Tests = [][2]string{
|
||||
{"ABC", "ABC"},
|
||||
{string([]byte{0x80}), "<22>"},
|
||||
{string([]byte{'a', 0x80, 'b'}), "a<>b"},
|
||||
}
|
||||
|
||||
func TestCleanRemote(t *testing.T) {
|
||||
f := &Fs{}
|
||||
f.warned = make(map[string]struct{})
|
||||
for _, test := range utf8Tests {
|
||||
got := f.cleanRemote(test[0])
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test Windows character replacements
|
||||
var testsWindows = [][2]string{
|
||||
{`c:\temp`, `c:\temp`},
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `//?/UNC/theserver/dir\file.txt`},
|
||||
{"c:/temp", "c:/temp"},
|
||||
{"/temp/file.txt", "/temp/file.txt"},
|
||||
{`!\"#¤%&/()=;:*^?+-`, "!\\_#¤%&/()=;__^_+-"},
|
||||
{`<>"|?*:&\<>"|?*:&\<>"|?*:&`, "_______&\\_______&\\_______&"},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`c:/temp`, `c:\temp`},
|
||||
{`/temp/file.txt`, `\temp\file.txt`},
|
||||
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`},
|
||||
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`},
|
||||
}
|
||||
|
||||
func TestCleanWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
for _, test := range testsWindows {
|
||||
got := cleanWindowsName(nil, test[0])
|
||||
got := cleanRootPath(test[0], true)
|
||||
expect := test[1]
|
||||
if got != expect {
|
||||
t.Fatalf("got %q, expected %q", got, expect)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -41,6 +42,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Mailru
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
minSleepPacer = 10 * time.Millisecond
|
||||
@@ -59,6 +62,9 @@ var (
|
||||
ErrorDirAlreadyExists = errors.New("directory already exists")
|
||||
ErrorDirSourceNotExists = errors.New("directory source does not exist")
|
||||
ErrorInvalidName = errors.New("invalid characters in object name")
|
||||
|
||||
// MrHashType is the hash.Type for Mailru
|
||||
MrHashType hash.Type
|
||||
)
|
||||
|
||||
// Description of how to authorize
|
||||
@@ -74,6 +80,7 @@ var oauthConfig = &oauth2.Config{
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
@@ -217,11 +224,11 @@ var retryErrorCodes = []int{
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
// Retries password authorization (once) in a special case of access denied.
|
||||
func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, error) {
|
||||
if res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
||||
if res != nil && res.StatusCode == 403 && f.opt.Password != "" && !f.passFailed {
|
||||
reAuthErr := f.reAuthorize(opts, err)
|
||||
return reAuthErr == nil, err // return an original error
|
||||
}
|
||||
if f.quirks.retry400 && res.StatusCode == 400 {
|
||||
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
@@ -280,7 +287,7 @@ type Fs struct {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
fs.Debugf(nil, ">>> NewFs %q %q", name, root)
|
||||
// fs.Debugf(nil, ">>> NewFs %q %q", name, root)
|
||||
ctx := context.Background() // Note: NewFs does not pass context!
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -515,7 +522,7 @@ func (f *Fs) accessToken() (string, error) {
|
||||
|
||||
// absPath converts root-relative remote to absolute home path
|
||||
func (f *Fs) absPath(remote string) string {
|
||||
return "/" + path.Join(f.root, strings.Trim(remote, "/"))
|
||||
return path.Join("/", f.root, remote)
|
||||
}
|
||||
|
||||
// relPath converts absolute home path to root-relative remote
|
||||
@@ -600,7 +607,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
Path: "/api/m1/file",
|
||||
Parameters: url.Values{
|
||||
"access_token": {token},
|
||||
"home": {path},
|
||||
"home": {enc.FromStandardPath(path)},
|
||||
"offset": {"0"},
|
||||
"limit": {strconv.Itoa(maxInt32)},
|
||||
},
|
||||
@@ -635,7 +642,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
|
||||
// =0 - for an empty directory
|
||||
// >0 - for a non-empty directory
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.DirEntry, dirSize int, err error) {
|
||||
remote, err := f.relPath(item.Home)
|
||||
remote, err := f.relPath(enc.ToStandardPath(item.Home))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
@@ -668,7 +675,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
// dir should be "" to list the root, and should not have trailing slashes.
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, ">>> List: %q", dir)
|
||||
// fs.Debugf(f, ">>> List: %q", dir)
|
||||
|
||||
if f.quirks.binlist {
|
||||
entries, err = f.listBin(ctx, f.absPath(dir), 1)
|
||||
@@ -682,7 +689,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
names = append(names, entry.Remote())
|
||||
}
|
||||
sort.Strings(names)
|
||||
fs.Debugf(f, "List(%q): %v", dir, names)
|
||||
// fs.Debugf(f, "List(%q): %v", dir, names)
|
||||
}
|
||||
|
||||
return
|
||||
@@ -701,7 +708,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
params.Set("limit", strconv.Itoa(limit))
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", dirPath)
|
||||
data.Set("home", enc.FromStandardPath(dirPath))
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -749,7 +756,7 @@ func (f *Fs) listBin(ctx context.Context, dirPath string, depth int) (entries fs
|
||||
|
||||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationFolderList)
|
||||
req.WriteString(dirPath)
|
||||
req.WriteString(enc.FromStandardPath(dirPath))
|
||||
req.WritePu32(int64(depth))
|
||||
req.WritePu32(int64(options))
|
||||
req.WritePu32(0)
|
||||
@@ -885,7 +892,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
||||
if (head & 4096) != 0 {
|
||||
t.dunnoNodeID = r.ReadNBytes(api.DunnoNodeIDLength)
|
||||
}
|
||||
name := string(r.ReadBytesByLength())
|
||||
name := enc.FromStandardPath(string(r.ReadBytesByLength()))
|
||||
t.dunno1 = int(r.ReadULong())
|
||||
t.dunno2 = 0
|
||||
t.dunno3 = 0
|
||||
@@ -1019,12 +1026,12 @@ func (rev *treeRevision) Read(data *api.BinReader) error {
|
||||
|
||||
// CreateDir makes a directory (parent must exist)
|
||||
func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
||||
fs.Debugf(f, ">>> CreateDir %q", path)
|
||||
// fs.Debugf(f, ">>> CreateDir %q", path)
|
||||
|
||||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationCreateFolder)
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(path)
|
||||
req.WriteString(enc.FromStandardPath(path))
|
||||
req.WritePu32(0)
|
||||
|
||||
token, err := f.accessToken()
|
||||
@@ -1081,7 +1088,7 @@ func (f *Fs) CreateDir(ctx context.Context, path string) error {
|
||||
// already exists. As a workaround, users can add string "atomicmkdir" in the
|
||||
// hidden `quirks` parameter or in the `--mailru-quirks` command-line option.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, ">>> Mkdir %q", dir)
|
||||
// fs.Debugf(f, ">>> Mkdir %q", dir)
|
||||
err := f.mkDirs(ctx, f.absPath(dir))
|
||||
if err == ErrorDirAlreadyExists && !f.quirks.atomicmkdir {
|
||||
return nil
|
||||
@@ -1142,7 +1149,7 @@ func (f *Fs) mkParentDirs(ctx context.Context, path string) error {
|
||||
// Rmdir deletes a directory.
|
||||
// Returns an error if it isn't empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, ">>> Rmdir %q", dir)
|
||||
// fs.Debugf(f, ">>> Rmdir %q", dir)
|
||||
return f.purgeWithCheck(ctx, dir, true, "rmdir")
|
||||
}
|
||||
|
||||
@@ -1150,7 +1157,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Optional interface: Only implement this if you have a way of deleting
|
||||
// all the files quicker than just running Remove() on the result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fs.Debugf(f, ">>> Purge")
|
||||
// fs.Debugf(f, ">>> Purge")
|
||||
return f.purgeWithCheck(ctx, "", false, "purge")
|
||||
}
|
||||
|
||||
@@ -1179,7 +1186,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
data := url.Values{"home": {path}}
|
||||
data := url.Values{"home": {enc.FromStandardPath(path)}}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/api/m1/file/remove",
|
||||
@@ -1212,7 +1219,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, ">>> Copy %q %q", src.Remote(), remote)
|
||||
// fs.Debugf(f, ">>> Copy %q %q", src.Remote(), remote)
|
||||
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
@@ -1228,7 +1235,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
srcPath := srcObj.absPath()
|
||||
dstPath := f.absPath(remote)
|
||||
overwrite := false
|
||||
fs.Debugf(f, "copy %q -> %q\n", srcPath, dstPath)
|
||||
// fs.Debugf(f, "copy %q -> %q\n", srcPath, dstPath)
|
||||
|
||||
err := f.mkParentDirs(ctx, dstPath)
|
||||
if err != nil {
|
||||
@@ -1236,8 +1243,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", srcPath)
|
||||
data.Set("folder", parentDir(dstPath))
|
||||
data.Set("home", enc.FromStandardPath(srcPath))
|
||||
data.Set("folder", enc.FromStandardPath(parentDir(dstPath)))
|
||||
data.Set("email", f.opt.Username)
|
||||
data.Set("x-email", f.opt.Username)
|
||||
|
||||
@@ -1275,9 +1282,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fmt.Errorf("copy failed with code %d", response.Status)
|
||||
}
|
||||
|
||||
tmpPath := response.Body
|
||||
tmpPath := enc.ToStandardPath(response.Body)
|
||||
if tmpPath != dstPath {
|
||||
fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
|
||||
// fs.Debugf(f, "rename temporary file %q -> %q\n", tmpPath, dstPath)
|
||||
err = f.moveItemBin(ctx, tmpPath, dstPath, "rename temporary file")
|
||||
if err != nil {
|
||||
_ = f.delete(ctx, tmpPath, false) // ignore error
|
||||
@@ -1307,7 +1314,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, ">>> Move %q %q", src.Remote(), remote)
|
||||
// fs.Debugf(f, ">>> Move %q %q", src.Remote(), remote)
|
||||
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
@@ -1350,9 +1357,9 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
||||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationRename)
|
||||
req.WritePu32(0) // old revision
|
||||
req.WriteString(srcPath)
|
||||
req.WriteString(enc.FromStandardPath(srcPath))
|
||||
req.WritePu32(0) // new revision
|
||||
req.WriteString(dstPath)
|
||||
req.WriteString(enc.FromStandardPath(dstPath))
|
||||
req.WritePu32(0) // dunno
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -1393,7 +1400,7 @@ func (f *Fs) moveItemBin(ctx context.Context, srcPath, dstPath, opName string) e
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(f, ">>> DirMove %q %q", srcRemote, dstRemote)
|
||||
// fs.Debugf(f, ">>> DirMove %q %q", srcRemote, dstRemote)
|
||||
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
@@ -1407,7 +1414,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
srcPath := srcFs.absPath(srcRemote)
|
||||
dstPath := f.absPath(dstRemote)
|
||||
fs.Debugf(srcFs, "DirMove [%s]%q --> [%s]%q\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||
// fs.Debugf(srcFs, "DirMove [%s]%q --> [%s]%q\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if len(srcPath) <= len(srcFs.root) || len(dstPath) <= len(f.root) {
|
||||
@@ -1435,7 +1442,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
fs.Debugf(f, ">>> PublicLink %q", remote)
|
||||
// fs.Debugf(f, ">>> PublicLink %q", remote)
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
@@ -1443,7 +1450,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
|
||||
data := url.Values{}
|
||||
data.Set("home", f.absPath(remote))
|
||||
data.Set("home", enc.FromStandardPath(f.absPath(remote)))
|
||||
data.Set("email", f.opt.Username)
|
||||
data.Set("x-email", f.opt.Username)
|
||||
|
||||
@@ -1477,7 +1484,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
// CleanUp permanently deletes all trashed files/folders
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
fs.Debugf(f, ">>> CleanUp")
|
||||
// fs.Debugf(f, ">>> CleanUp")
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
@@ -1517,7 +1524,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
fs.Debugf(f, ">>> About")
|
||||
// fs.Debugf(f, ">>> About")
|
||||
|
||||
token, err := f.accessToken()
|
||||
if err != nil {
|
||||
@@ -1561,7 +1568,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Debugf(f, ">>> Put: %q %d '%v'", o.remote, o.size, o.modTime)
|
||||
// fs.Debugf(f, ">>> Put: %q %d '%v'", o.remote, o.size, o.modTime)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
@@ -1581,23 +1588,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var (
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
trySpeedup bool
|
||||
)
|
||||
|
||||
// Request hash from source
|
||||
if srcHash, err := src.Hash(ctx, hash.Mailru); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
|
||||
// Try speedup method if it's globally enabled and source hash is available
|
||||
trySpeedup := o.fs.opt.SpeedupEnable
|
||||
if trySpeedup && fileHash != nil {
|
||||
if o.putByHash(ctx, fileHash, src, "source") {
|
||||
return nil
|
||||
// Don't disturb the source if file fits in hash.
|
||||
// Skip an extra speedup request if file fits in hash.
|
||||
if size > mrhash.Size {
|
||||
// Request hash from source.
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
|
||||
// Try speedup if it's globally enabled and source hash is available.
|
||||
trySpeedup = o.fs.opt.SpeedupEnable
|
||||
if trySpeedup && fileHash != nil {
|
||||
if o.putByHash(ctx, fileHash, src, "source") {
|
||||
return nil
|
||||
}
|
||||
trySpeedup = false // speedup failed, force upload
|
||||
}
|
||||
trySpeedup = false // speedup failed, force upload
|
||||
}
|
||||
|
||||
// Need to calculate hash, check whether file is still eligible for speedup
|
||||
@@ -1757,7 +1769,7 @@ func makeTempFile(ctx context.Context, tmpFs fs.Fs, wrapIn io.Reader, src fs.Obj
|
||||
hashType := hash.SHA1
|
||||
|
||||
// Calculate Mailru and spool verification hashes in transit
|
||||
hashSet := hash.NewHashSet(hash.Mailru, hashType)
|
||||
hashSet := hash.NewHashSet(MrHashType, hashType)
|
||||
hasher, err := hash.NewMultiHasherTypes(hashSet)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -1779,7 +1791,7 @@ func makeTempFile(ctx context.Context, tmpFs fs.Fs, wrapIn io.Reader, src fs.Obj
|
||||
return nil, nil, mrhash.ErrorInvalidHash
|
||||
}
|
||||
|
||||
mrHash, err = mrhash.DecodeString(sums[hash.Mailru])
|
||||
mrHash, err = mrhash.DecodeString(sums[MrHashType])
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1883,7 +1895,7 @@ type Object struct {
|
||||
// NewObject finds an Object at the remote.
|
||||
// If object can't be found it fails with fs.ErrorObjectNotFound
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, ">>> NewObject %q", remote)
|
||||
// fs.Debugf(f, ">>> NewObject %q", remote)
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -1967,7 +1979,7 @@ func (o *Object) Size() int64 {
|
||||
// Hash returns the MD5 or SHA1 sum of an object
|
||||
// returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == hash.Mailru {
|
||||
if t == MrHashType {
|
||||
return hex.EncodeToString(o.mrHash), nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -1982,7 +1994,7 @@ func (o *Object) Storable() bool {
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
fs.Debugf(o, ">>> SetModTime [%v]", modTime)
|
||||
// fs.Debugf(o, ">>> SetModTime [%v]", modTime)
|
||||
o.modTime = modTime
|
||||
return o.addFileMetaData(ctx, true)
|
||||
}
|
||||
@@ -2003,7 +2015,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
req := api.NewBinWriter()
|
||||
req.WritePu16(api.OperationAddFile)
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(o.absPath())
|
||||
req.WriteString(enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
@@ -2055,7 +2067,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fs.Debugf(o, ">>> Remove")
|
||||
// fs.Debugf(o, ">>> Remove")
|
||||
return o.fs.delete(ctx, o.absPath(), false)
|
||||
}
|
||||
|
||||
@@ -2088,7 +2100,7 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
|
||||
|
||||
// Open an object for read and download its content
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.Debugf(o, ">>> Open")
|
||||
// fs.Debugf(o, ">>> Open")
|
||||
|
||||
token, err := o.fs.accessToken()
|
||||
if err != nil {
|
||||
@@ -2101,7 +2113,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
Path: url.PathEscape(strings.TrimLeft(o.absPath(), "/")),
|
||||
Path: url.PathEscape(strings.TrimLeft(enc.FromStandardPath(o.absPath()), "/")),
|
||||
Parameters: url.Values{
|
||||
"client_id": {api.OAuthClientID},
|
||||
"token": {token},
|
||||
@@ -2349,7 +2361,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
|
||||
// Hashes returns the supported hash sets
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Mailru)
|
||||
return hash.Set(MrHashType)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -36,6 +37,8 @@ import (
|
||||
mega "github.com/t3rm1n4l/go-mega"
|
||||
)
|
||||
|
||||
const enc = encodings.Mega
|
||||
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
@@ -245,14 +248,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// splitNodePath splits nodePath into / separated parts, returning nil if it
|
||||
// should refer to the root
|
||||
// should refer to the root.
|
||||
// It also encodes the parts into backend specific encoding
|
||||
func splitNodePath(nodePath string) (parts []string) {
|
||||
nodePath = path.Clean(nodePath)
|
||||
parts = strings.Split(nodePath, "/")
|
||||
if len(parts) == 1 && (parts[0] == "." || parts[0] == "/") {
|
||||
if nodePath == "." || nodePath == "/" {
|
||||
return nil
|
||||
}
|
||||
return parts
|
||||
nodePath = enc.FromStandardPath(nodePath)
|
||||
return strings.Split(nodePath, "/")
|
||||
}
|
||||
|
||||
// findNode looks up the node for the path of the name given from the root given
|
||||
@@ -418,7 +422,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
errors := 0
|
||||
// similar to f.deleteNode(trash) but with HardDelete as true
|
||||
for _, item := range items {
|
||||
fs.Debugf(f, "Deleting trash %q", item.GetName())
|
||||
fs.Debugf(f, "Deleting trash %q", enc.ToStandardName(item.GetName()))
|
||||
deleteErr := f.pacer.Call(func() (bool, error) {
|
||||
err := f.srv.Delete(item, true)
|
||||
return shouldRetry(err)
|
||||
@@ -500,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
||||
remote := path.Join(dir, info.GetName())
|
||||
remote := path.Join(dir, enc.ToStandardName(info.GetName()))
|
||||
switch info.GetType() {
|
||||
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
||||
d := fs.NewDir(remote, info.GetTimeStamp()).SetID(info.GetHash())
|
||||
@@ -722,7 +726,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Rename(info, dstLeaf)
|
||||
err = f.srv.Rename(info, enc.FromStandardName(dstLeaf))
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -871,13 +875,13 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// move them into place
|
||||
for _, info := range infos {
|
||||
fs.Infof(srcDir, "merging %q", info.GetName())
|
||||
fs.Infof(srcDir, "merging %q", enc.ToStandardName(info.GetName()))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.srv.Move(info, dstDirNode)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", enc.ToStandardName(info.GetName()), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
@@ -1120,7 +1124,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var u *mega.Upload
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
u, err = o.fs.srv.NewUpload(dirNode, leaf, size)
|
||||
u, err = o.fs.srv.NewUpload(dirNode, enc.FromStandardName(leaf), size)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -15,17 +15,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/rclone/rclone/backend/onedrive/quickxorhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.OneDrive
|
||||
|
||||
const (
|
||||
rcloneClientID = "b15665d9-eda6-4092-8539-0eec376afd59"
|
||||
rcloneEncryptedClientSecret = "_JUdzh3LnKNqSPcf4Wu5fgMFIQOI8glZu_akYgR8yf6egowNBg-R"
|
||||
@@ -63,10 +66,14 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
@@ -218,9 +225,9 @@ func init() {
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k.
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k. Note
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
@@ -345,7 +352,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -368,7 +375,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/root:/" + rest.URLPathEscape(replaceReservedChars(path)),
|
||||
Path: "/root:/" + rest.URLPathEscape(enc.FromStandardPath(path)),
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -616,7 +623,7 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
var info *api.Item
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -676,7 +683,7 @@ OUTER:
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.GetName())
|
||||
item.Name = enc.ToStandardName(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -913,8 +920,8 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
||||
dstPath := f.rootSlash() + remote
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
@@ -932,7 +939,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
id, dstDriveID, _ := parseNormalizedID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
replacedLeaf := enc.FromStandardName(leaf)
|
||||
copyReq := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
@@ -1016,7 +1023,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: id,
|
||||
@@ -1131,7 +1138,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// Do the move
|
||||
opts := newOptsCall(srcID, "PATCH", "")
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: enc.FromStandardName(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
DriveID: dstDriveID,
|
||||
ID: parsedDstDirID,
|
||||
@@ -1192,12 +1199,12 @@ func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(hash.QuickXorHash)
|
||||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
|
||||
info, _, err := f.readMetaDataForPath(ctx, f.rootPath(remote))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1241,9 +1248,19 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in server given a remote
|
||||
func (f *Fs) rootPath(remote string) string {
|
||||
return f.rootSlash() + remote
|
||||
}
|
||||
|
||||
// rootPath returns a path for use in local functions
|
||||
func (o *Object) rootPath() string {
|
||||
return o.fs.rootPath(o.remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server given a remote
|
||||
func (f *Fs) srvPath(remote string) string {
|
||||
return replaceReservedChars(f.rootSlash() + remote)
|
||||
return enc.FromStandardPath(f.rootSlash() + remote)
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
@@ -1258,7 +1275,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == hash.QuickXorHash {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
@@ -1320,7 +1337,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
|
||||
info, _, err := o.fs.readMetaDataForPath(ctx, o.rootPath())
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.ErrorInfo.Code == "itemNotFound" {
|
||||
@@ -1355,7 +1372,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1429,7 +1446,8 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(replaceReservedChars(leaf)) + ":/createUploadSession",
|
||||
Path: fmt.Sprintf("/%s/items/%s:/%s:/createUploadSession",
|
||||
drive, id, rest.URLPathEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1581,7 +1599,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf) + ":/content",
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
/*
|
||||
Translate file names for one drive
|
||||
|
||||
OneDrive reserved characters
|
||||
|
||||
The following characters are OneDrive reserved characters, and can't
|
||||
be used in OneDrive folder and file names.
|
||||
|
||||
onedrive-reserved = "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|"
|
||||
onedrive-business-reserved
|
||||
= "/" / "\" / "*" / "<" / ">" / "?" / ":" / "|" / "#" / "%"
|
||||
|
||||
Note: Folder names can't end with a period (.).
|
||||
|
||||
Note: OneDrive for Business file or folder names cannot begin with a
|
||||
tilde ('~').
|
||||
|
||||
*/
|
||||
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'#': '#', // FULLWIDTH NUMBER SIGN
|
||||
'%': '%', // FULLWIDTH PERCENT SIGN
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
'.': '.', // FULLWIDTH FULL STOP
|
||||
'~': '~', // FULLWIDTH TILDE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixEndingInPeriod = regexp.MustCompile(`\.(/|$)`)
|
||||
fixStartingWithTilde = regexp.MustCompile(`(/|^)~`)
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Folder names can't end with a period '.'
|
||||
in = fixEndingInPeriod.ReplaceAllString(in, string(charMap['.'])+"$1")
|
||||
// OneDrive for Business file or folder names cannot begin with a tilde '~'
|
||||
in = fixStartingWithTilde.ReplaceAllString(in, "$1"+string(charMap['~']))
|
||||
// Apparently file names can't start with space either
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Replace reserved characters
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != '.' && c != '~' && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package onedrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"~leading tilde", "~leading tilde"},
|
||||
{"trailing dot.", "trailing dot."},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"~leading tilde/~leading tilde/~leading tilde", "~leading tilde/~leading tilde/~leading tilde"},
|
||||
{"trailing dot./trailing dot./trailing dot.", "trailing dot./trailing dot./trailing dot."},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -25,6 +26,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.OpenDrive
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://dev.opendrive.com/api/v1"
|
||||
minSleep = 10 * time.Millisecond
|
||||
@@ -585,7 +588,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
return o, leaf, directoryID, nil
|
||||
return o, enc.FromStandardName(leaf), directoryID, nil
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
@@ -636,7 +639,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
var resp *http.Response
|
||||
response := createFileResponse{}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
createFileData := createFile{SessionID: o.fs.session.SessionID, FolderID: directoryID, Name: replaceReservedChars(leaf)}
|
||||
createFileData := createFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FolderID: directoryID,
|
||||
Name: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
@@ -683,7 +690,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
createDirData := createFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderName: replaceReservedChars(leaf),
|
||||
FolderName: enc.FromStandardName(leaf),
|
||||
FolderSubParent: pathID,
|
||||
FolderIsPublic: 0,
|
||||
FolderPublicUpl: 0,
|
||||
@@ -729,8 +736,8 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
return "", false, errors.Wrap(err, "failed to get folder list")
|
||||
}
|
||||
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
|
||||
if leaf == folder.Name {
|
||||
@@ -777,7 +784,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, folder := range folderList.Folders {
|
||||
folder.Name = restoreReservedChars(folder.Name)
|
||||
folder.Name = enc.ToStandardName(folder.Name)
|
||||
// fs.Debugf(nil, "Folder: %s (%s)", folder.Name, folder.FolderID)
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
@@ -788,7 +795,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, file := range folderList.Files {
|
||||
file.Name = restoreReservedChars(file.Name)
|
||||
file.Name = enc.ToStandardName(file.Name)
|
||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||
remote := path.Join(dir, file.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||
@@ -851,7 +858,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
NoResponse: true,
|
||||
Path: "/file/filesettings.json",
|
||||
}
|
||||
update := modTimeFile{SessionID: o.fs.session.SessionID, FileID: o.id, FileModificationTime: strconv.FormatInt(modTime.Unix(), 10)}
|
||||
update := modTimeFile{
|
||||
SessionID: o.fs.session.SessionID,
|
||||
FileID: o.id,
|
||||
FileModificationTime: strconv.FormatInt(modTime.Unix(), 10),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, nil)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
@@ -1038,7 +1049,8 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folder/itembyname.json/" + o.fs.session.SessionID + "/" + directoryID + "?name=" + url.QueryEscape(replaceReservedChars(leaf)),
|
||||
Path: fmt.Sprintf("/folder/itembyname.json/%s/%s?name=%s",
|
||||
o.fs.session.SessionID, directoryID, url.QueryEscape(enc.FromStandardName(leaf))),
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &folderList)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
Translate file names for OpenDrive
|
||||
|
||||
OpenDrive reserved characters
|
||||
|
||||
The following characters are OpenDrive reserved characters, and can't
|
||||
be used in OpenDrive folder and file names.
|
||||
|
||||
\ / : * ? " < > |
|
||||
|
||||
OpenDrive files and folders can't have leading or trailing spaces also.
|
||||
|
||||
*/
|
||||
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// OpenDrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
':': ':', // FULLWIDTH COLON
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
invCharMap map[rune]rune
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package opendrive
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:|#%".~`, `\*<>?:|#%".~`},
|
||||
{`\*<>?:|#%".~/\*<>?:|#%".~`, `\*<>?:|#%".~/\*<>?:|#%".~`},
|
||||
{" leading space", "␠leading space"},
|
||||
{" path/ leading spaces", "␠path/␠ leading spaces"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{"trailing spaces /path ", "trailing spaces ␠/path␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
@@ -35,6 +36,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Pcloud
|
||||
|
||||
const (
|
||||
rcloneClientID = "DnONSzyJXpm"
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
@@ -175,21 +178,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for pcloud
|
||||
//
|
||||
// Generally all characters are allowed in filenames, except the NULL
|
||||
// byte, forward and backslash (/,\ and \0)
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for pcloud
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -354,7 +342,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/createfolder",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("name", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("name", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
@@ -430,7 +418,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
continue
|
||||
}
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
@@ -622,7 +610,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
|
||||
var resp *http.Response
|
||||
@@ -701,7 +689,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
@@ -798,7 +786,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
||||
opts.Parameters.Set("toname", replaceReservedChars(leaf))
|
||||
opts.Parameters.Set("toname", enc.FromStandardName(leaf))
|
||||
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||
var resp *http.Response
|
||||
var result api.ItemResult
|
||||
@@ -1078,7 +1066,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
}
|
||||
leaf = replaceReservedChars(leaf)
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||
opts.Parameters.Set("nopartial", "1")
|
||||
|
||||
@@ -2,9 +2,7 @@
|
||||
// object storage system.
|
||||
package premiumizeme
|
||||
|
||||
/* FIXME
|
||||
escaping needs fixing
|
||||
|
||||
/*
|
||||
Run of rclone info
|
||||
stringNeedsEscaping = []rune{
|
||||
0x00, 0x0A, 0x0D, 0x22, 0x2F, 0x5C, 0xBF, 0xFE
|
||||
@@ -36,6 +34,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -47,6 +46,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.PremiumizeMe
|
||||
|
||||
const (
|
||||
rcloneClientID = "658922194"
|
||||
rcloneEncryptedClientSecret = "B5YIvQoRIhcpAYs8HYeyjb9gK-ftmZEbqdh_gNfc4RgO9Q"
|
||||
@@ -170,24 +171,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
x = strings.Replace(x, "\\", "\", -1)
|
||||
// Double quote for FULLWIDTH QUOTATION MARK
|
||||
x = strings.Replace(x, `"`, """, -1)
|
||||
return x
|
||||
}
|
||||
|
||||
// restore reserved characters
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH QUOTATION MARK for Double quote
|
||||
x = strings.Replace(x, """, `"`, -1)
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
x = strings.Replace(x, "\", "\\", -1)
|
||||
return x
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string, directoriesOnly bool, filesOnly bool) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
@@ -381,7 +364,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
Path: "/folder/create",
|
||||
Parameters: f.baseParams(),
|
||||
MultipartParams: url.Values{
|
||||
"name": {replaceReservedChars(leaf)},
|
||||
"name": {enc.FromStandardName(leaf)},
|
||||
"parent_id": {pathID},
|
||||
},
|
||||
}
|
||||
@@ -446,7 +429,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
|
||||
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break
|
||||
@@ -654,8 +637,8 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
// between directories and a separate one to rename them. We try to
|
||||
// call the minimum number of API calls.
|
||||
func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (err error) {
|
||||
newLeaf = replaceReservedChars(newLeaf)
|
||||
oldLeaf = replaceReservedChars(oldLeaf)
|
||||
newLeaf = enc.FromStandardName(newLeaf)
|
||||
oldLeaf = enc.FromStandardName(oldLeaf)
|
||||
doRenameLeaf := oldLeaf != newLeaf
|
||||
doMove := oldDirectoryID != newDirectoryID
|
||||
|
||||
@@ -686,7 +669,7 @@ func (f *Fs) move(ctx context.Context, isFile bool, id, oldLeaf, newLeaf, oldDir
|
||||
} else {
|
||||
opts.MultipartParams.Set("folders[]", id)
|
||||
}
|
||||
//replacedLeaf := replaceReservedChars(leaf)
|
||||
//replacedLeaf := enc.FromStandardName(leaf)
|
||||
var resp *http.Response
|
||||
var result api.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -908,7 +891,7 @@ func (o *Object) Remote() string {
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
return enc.FromStandardPath(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
@@ -1023,7 +1006,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leaf = replaceReservedChars(leaf)
|
||||
leaf = enc.FromStandardName(leaf)
|
||||
|
||||
var resp *http.Response
|
||||
var info api.FolderUploadinfoResponse
|
||||
|
||||
@@ -145,7 +145,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, enc.FromStandardName(leaf), parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
@@ -172,11 +172,11 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
if child.Name == leaf {
|
||||
if enc.ToStandardName(child.Name) == leaf {
|
||||
found = true
|
||||
pathIDOut = itoa(child.ID)
|
||||
if !child.IsDir() {
|
||||
err = fs.ErrorNotAFile
|
||||
err = fs.ErrorIsFile
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -214,7 +214,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
remote := path.Join(dir, child.Name)
|
||||
remote := path.Join(dir, enc.ToStandardName(child.Name))
|
||||
// fs.Debugf(f, "child: %s", remote)
|
||||
if child.IsDir() {
|
||||
f.dirCache.Put(remote, itoa(child.ID))
|
||||
@@ -292,7 +292,7 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(name))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(enc.FromStandardName(name)))
|
||||
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
@@ -505,7 +505,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", leaf)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -544,7 +544,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Objec
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", leaf)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -633,7 +633,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", leaf)
|
||||
params.Set("name", enc.FromStandardName(leaf))
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
||||
@@ -137,7 +137,7 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.PathEscape(leaf), nil)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.QueryEscape(enc.FromStandardName(leaf)), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -147,6 +147,12 @@ func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.File.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &resp.File, err
|
||||
}
|
||||
|
||||
|
||||
@@ -8,11 +8,25 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
/*
|
||||
// TestPutio
|
||||
stringNeedsEscaping = []rune{
|
||||
'/', '\x00'
|
||||
}
|
||||
maxFileLength = 255
|
||||
canWriteUnnormalized = true
|
||||
canReadUnnormalized = true
|
||||
canReadRenormalized = true
|
||||
canStream = false
|
||||
*/
|
||||
const enc = encodings.Putio
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "4131"
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -29,6 +30,8 @@ import (
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
)
|
||||
|
||||
const enc = encodings.QingStor
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -184,7 +187,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -353,7 +357,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bucketInit.HeadObject(f.rootDirectory, &qs.HeadObjectInput{})
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
@@ -550,6 +555,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -569,12 +575,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
for _, object := range resp.Keys {
|
||||
key := qs.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", key)
|
||||
remote := qs.StringValue(object.Key)
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote := key[len(prefix):]
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -646,7 +653,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
}
|
||||
|
||||
for _, bucket := range resp.Buckets {
|
||||
d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created))
|
||||
d := fs.NewDir(enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
|
||||
291
backend/s3/s3.go
291
backend/s3/s3.go
@@ -17,11 +17,14 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -41,6 +44,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -50,6 +54,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const enc = encodings.S3
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -748,6 +754,17 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
|
||||
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Provider: "AWS",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -788,6 +805,7 @@ type Options struct {
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -818,6 +836,7 @@ type Object struct {
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
mimeType string // MimeType of object - may be ""
|
||||
storageClass string // eg GLACIER
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -898,7 +917,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(bucketName), enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
@@ -1089,12 +1109,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.rootBucket,
|
||||
Key: &f.rootDirectory,
|
||||
Key: &encodedDirectory,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
@@ -1132,6 +1155,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.etag = aws.StringValue(info.ETag)
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = aws.StringValue(info.StorageClass)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -1214,6 +1238,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
delimiter = "/"
|
||||
}
|
||||
var marker *string
|
||||
// URL encode the listings so we can use control characters in object names
|
||||
// See: https://github.com/aws/aws-sdk-go/issues/1914
|
||||
//
|
||||
// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
|
||||
// it doesn't encode CommonPrefixes.
|
||||
// See: https://tracker.ceph.com/issues/41870
|
||||
//
|
||||
// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
|
||||
// though maybe it does on some versions.
|
||||
//
|
||||
// This does work with minio but was only added relatively recently
|
||||
// https://github.com/minio/minio/pull/7265
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
@@ -1223,10 +1263,26 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
if urlEncodeListings {
|
||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||
}
|
||||
var resp *s3.ListObjectsOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.ListObjectsWithContext(ctx, &req)
|
||||
if err != nil && !urlEncodeListings {
|
||||
if awsErr, ok := err.(awserr.RequestFailure); ok {
|
||||
if origErr := awsErr.OrigErr(); origErr != nil {
|
||||
if _, ok := origErr.(*xml.SyntaxError); ok {
|
||||
// Retry the listing with URL encoding as there were characters that XML can't encode
|
||||
urlEncodeListings = true
|
||||
req.EncodingType = aws.String(s3.EncodingTypeUrl)
|
||||
fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1255,6 +1311,14 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -1274,6 +1338,14 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
for _, object := range resp.Contents {
|
||||
remote := aws.StringValue(object.Key)
|
||||
if urlEncodeListings {
|
||||
remote, err = url.QueryUnescape(remote)
|
||||
if err != nil {
|
||||
fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
remote = enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
@@ -1358,7 +1430,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range resp.Buckets {
|
||||
bucketName := aws.StringValue(bucket.Name)
|
||||
bucketName := enc.ToStandardName(aws.StringValue(bucket.Name))
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
|
||||
entries = append(entries, d)
|
||||
@@ -1550,6 +1622,132 @@ func pathEscape(s string) string {
|
||||
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
|
||||
}
|
||||
|
||||
// copy does a server side copy
|
||||
//
|
||||
// It adds the boiler plate to the req passed in and calls the s3
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if req.StorageClass == nil && f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.UploadCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
start := partIndex * partSize
|
||||
var ends string
|
||||
if partIndex == numParts-1 {
|
||||
if totalSize >= 0 {
|
||||
ends = strconv.FormatInt(totalSize, 10)
|
||||
}
|
||||
} else {
|
||||
ends = strconv.FormatInt(start+partSize-1, 10)
|
||||
}
|
||||
return fmt.Sprintf("bytes=%v-%v", start, ends)
|
||||
}
|
||||
|
||||
func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
partSize := int64(f.opt.ChunkSize)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
uploadPartReq := &s3.UploadPartCopyInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
PartNumber: &partNum,
|
||||
UploadId: uid,
|
||||
CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
|
||||
// Args copy from req
|
||||
CopySource: req.CopySource,
|
||||
CopySourceIfMatch: req.CopySourceIfMatch,
|
||||
CopySourceIfModifiedSince: req.CopySourceIfModifiedSince,
|
||||
CopySourceIfNoneMatch: req.CopySourceIfNoneMatch,
|
||||
CopySourceIfUnmodifiedSince: req.CopySourceIfUnmodifiedSince,
|
||||
CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
|
||||
CopySourceSSECustomerKey: req.CopySourceSSECustomerKey,
|
||||
CopySourceSSECustomerKeyMD5: req.CopySourceSSECustomerKeyMD5,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
}
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.CopyPartResult.ETag,
|
||||
})
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -1571,27 +1769,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &dstBucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &dstPath,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObjectWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1691,6 +1872,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
o.lastModified = time.Now()
|
||||
@@ -1741,39 +1923,19 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, o)
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
bucket, bucketPath := o.split()
|
||||
sourceKey := path.Join(bucket, bucketPath)
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
CopySource: aws.String(pathEscape(sourceKey)),
|
||||
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: &directive,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass == "GLACIER" || o.fs.opt.StorageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
@@ -1832,7 +1994,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if multipart {
|
||||
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.LeavePartsOnError = o.fs.opt.LeavePartsOnError
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
@@ -1932,6 +2094,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errors.Wrap(err, "s3 upload: sign request")
|
||||
}
|
||||
|
||||
if o.fs.opt.V2Auth && headers == nil {
|
||||
headers = putObj.HTTPRequest.Header
|
||||
}
|
||||
|
||||
// Set request to nil if empty so as not to make chunked encoding
|
||||
if size == 0 {
|
||||
in = nil
|
||||
@@ -1998,6 +2164,31 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToUpper(tier)
|
||||
bucket, bucketPath := o.split()
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(tier),
|
||||
}
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageClass = tier
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageClass == "" {
|
||||
return "STANDARD"
|
||||
}
|
||||
return o.storageClass
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -2006,4 +2197,6 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
|
||||
@@ -103,9 +103,14 @@ when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: `Allow asking for SFTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will:
|
||||
- ask for a password
|
||||
- not contact the ssh agent
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
@@ -364,7 +369,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword) || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -945,6 +950,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
|
||||
if err != nil {
|
||||
_ = session.Close()
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
@@ -952,7 +958,10 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
str := parseHash(stdout.Bytes())
|
||||
b := stdout.Bytes()
|
||||
fs.Debugf(nil, "sftp output = %q", b)
|
||||
str := parseHash(b)
|
||||
fs.Debugf(nil, "sftp hash = %q", str)
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
@@ -961,7 +970,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return str, nil
|
||||
}
|
||||
|
||||
var shellEscapeRegex = regexp.MustCompile(`[^A-Za-z0-9_.,:/@\n-]`)
|
||||
var shellEscapeRegex = regexp.MustCompile("[^A-Za-z0-9_.,:/\\@\u0080-\uFFFFFFFF\n-]")
|
||||
|
||||
// Escape a string s.t. it cannot cause unintended behavior
|
||||
// when sending it to a shell.
|
||||
@@ -974,7 +983,9 @@ func shellEscape(str string) string {
|
||||
// an invocation of md5sum/sha1sum to a hash string
|
||||
// as expected by the rest of this application
|
||||
func parseHash(bytes []byte) string {
|
||||
return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator
|
||||
// For strings with backslash *sum writes a leading \
|
||||
// https://unix.stackexchange.com/q/313733/94054
|
||||
return strings.Split(strings.TrimLeft(string(bytes), "\\"), " ")[0] // Split at hash / filename separator
|
||||
}
|
||||
|
||||
// Parses the byte array output from the SSH session
|
||||
|
||||
152
backend/sharefile/api/types.go
Normal file
152
backend/sharefile/api/types.go
Normal file
@@ -0,0 +1,152 @@
|
||||
// Package api contains definitions for using the premiumize.me API
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ListRequestSelect should be used in $select for Items/Children
|
||||
const ListRequestSelect = "odata.count,FileCount,Name,FileName,CreationDate,IsHidden,FileSizeBytes,odata.type,Id,Hash,ClientModifiedDate"
|
||||
|
||||
// ListResponse is returned from the Items/Children call
|
||||
type ListResponse struct {
|
||||
OdataCount int `json:"odata.count"`
|
||||
Value []Item `json:"value"`
|
||||
}
|
||||
|
||||
// Item Types
|
||||
const (
|
||||
ItemTypeFolder = "ShareFile.Api.Models.Folder"
|
||||
ItemTypeFile = "ShareFile.Api.Models.File"
|
||||
)
|
||||
|
||||
// Item refers to a file or folder
|
||||
type Item struct {
|
||||
FileCount int32 `json:"FileCount,omitempty"`
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
CreatedAt time.Time `json:"CreationDate,omitempty"`
|
||||
ModifiedAt time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
IsHidden bool `json:"IsHidden,omitempty"`
|
||||
Size int64 `json:"FileSizeBytes,omitempty"`
|
||||
Type string `json:"odata.type,omitempty"`
|
||||
ID string `json:"Id,omitempty"`
|
||||
Hash string `json:"Hash,omitempty"`
|
||||
}
|
||||
|
||||
// Error is an odata error return
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message struct {
|
||||
Lang string `json:"lang"`
|
||||
Value string `json:"value"`
|
||||
} `json:"message"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// Satisfy error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s: %s", e.Message.Value, e.Code, e.Reason)
|
||||
}
|
||||
|
||||
// Check Error satisfies error interface
|
||||
var _ error = &Error{}
|
||||
|
||||
// DownloadSpecification is the response to /Items/Download
|
||||
type DownloadSpecification struct {
|
||||
Token string `json:"DownloadToken"`
|
||||
URL string `json:"DownloadUrl"`
|
||||
Metadata string `json:"odata.metadata"`
|
||||
Type string `json:"odata.type"`
|
||||
}
|
||||
|
||||
// UploadRequest is set to /Items/Upload2 to receive an UploadSpecification
|
||||
type UploadRequest struct {
|
||||
Method string `json:"method"` // Upload method: one of: standard, streamed or threaded
|
||||
Raw bool `json:"raw"` // Raw post if true or MIME upload if false
|
||||
Filename string `json:"fileName"` // Uploaded item file name.
|
||||
Filesize *int64 `json:"fileSize,omitempty"` // Uploaded item file size.
|
||||
Overwrite bool `json:"overwrite"` // Indicates whether items with the same name will be overwritten or not.
|
||||
CreatedDate time.Time `json:"ClientCreatedDate"` // Created Date of this Item.
|
||||
ModifiedDate time.Time `json:"ClientModifiedDate"` // Modified Date of this Item.
|
||||
BatchID string `json:"batchId,omitempty"` // Indicates part of a batch. Batched uploads do not send notification until the whole batch is completed.
|
||||
BatchLast *bool `json:"batchLast,omitempty"` // Indicates is the last in a batch. Upload notifications for the whole batch are sent after this upload.
|
||||
CanResume *bool `json:"canResume,omitempty"` // Indicates uploader supports resume.
|
||||
StartOver *bool `json:"startOver,omitempty"` // Indicates uploader wants to restart the file - i.e., ignore previous failed upload attempts.
|
||||
Tool string `json:"tool,omitempty"` // Identifies the uploader tool.
|
||||
Title string `json:"title,omitempty"` // Item Title
|
||||
Details string `json:"details,omitempty"` // Item description
|
||||
IsSend *bool `json:"isSend,omitempty"` // Indicates that this upload is part of a Send operation
|
||||
SendGUID string `json:"sendGuid,omitempty"` // Used if IsSend is true. Specifies which Send operation this upload is part of.
|
||||
OpID string `json:"opid,omitempty"` // Used for Asynchronous copy/move operations - called by Zones to push files to other Zones
|
||||
ThreadCount *int `json:"threadCount,omitempty"` // Specifies the number of threads the threaded uploader will use. Only used is method is threaded, ignored otherwise
|
||||
Notify *bool `json:"notify,omitempty"` // Indicates whether users will be notified of this upload - based on folder preferences
|
||||
ExpirationDays *int `json:"expirationDays,omitempty"` // File expiration days
|
||||
BaseFileID string `json:"baseFileId,omitempty"` // Used to check conflict in file during File Upload.
|
||||
}
|
||||
|
||||
// UploadSpecification is returned from /Items/Upload
|
||||
type UploadSpecification struct {
|
||||
Method string `json:"Method"` // The Upload method that must be used for this upload
|
||||
PrepareURI string `json:"PrepareUri"` // If provided, clients must issue a request to this Uri before uploading any data.
|
||||
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
|
||||
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
|
||||
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
|
||||
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
|
||||
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
|
||||
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
|
||||
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server
|
||||
MaxNumberOfThreads int `json:"MaxNumberOfThreads"` // Specifies the max number of chunks that can be sent simultaneously for threaded uploads
|
||||
}
|
||||
|
||||
// UploadFinishResponse is returnes from calling UploadSpecification.FinishURI
|
||||
type UploadFinishResponse struct {
|
||||
Error bool `json:"error"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
ErrorCode int `json:"errorCode"`
|
||||
Value []struct {
|
||||
UploadID string `json:"uploadid"`
|
||||
ParentID string `json:"parentid"`
|
||||
ID string `json:"id"`
|
||||
StreamID string `json:"streamid"`
|
||||
FileName string `json:"filename"`
|
||||
DisplayName string `json:"displayname"`
|
||||
Size int `json:"size"`
|
||||
Md5 string `json:"md5"`
|
||||
} `json:"value"`
|
||||
}
|
||||
|
||||
// ID returns the ID of the first response if available
|
||||
func (finish *UploadFinishResponse) ID() (string, error) {
|
||||
if finish.Error {
|
||||
return "", errors.Errorf("upload failed: %s (%d)", finish.ErrorMessage, finish.ErrorCode)
|
||||
}
|
||||
if len(finish.Value) == 0 {
|
||||
return "", errors.New("upload failed: no results returned")
|
||||
}
|
||||
return finish.Value[0].ID, nil
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent folder
|
||||
type Parent struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// Zone is where the data is stored
|
||||
type Zone struct {
|
||||
ID string `json:"Id,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest is sent to PATCH /v3/Items(id)
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
FileName string `json:"FileName,omitempty"`
|
||||
Description string `json:"Description,omitempty"`
|
||||
ExpirationDate *time.Time `json:"ExpirationDate,omitempty"`
|
||||
Parent *Parent `json:"Parent,omitempty"`
|
||||
Zone *Zone `json:"Zone,omitempty"`
|
||||
ModifiedAt *time.Time `json:"ClientModifiedDate,omitempty"`
|
||||
}
|
||||
22
backend/sharefile/generate_tzdata.go
Normal file
22
backend/sharefile/generate_tzdata.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/shurcooL/vfsgen"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var AssetDir http.FileSystem = http.Dir("./tzdata")
|
||||
err := vfsgen.Generate(AssetDir, vfsgen.Options{
|
||||
PackageName: "sharefile",
|
||||
BuildTags: "!dev",
|
||||
VariableName: "tzdata",
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
1518
backend/sharefile/sharefile.go
Normal file
1518
backend/sharefile/sharefile.go
Normal file
File diff suppressed because it is too large
Load Diff
34
backend/sharefile/sharefile_test.go
Normal file
34
backend/sharefile/sharefile_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Test filesystem interface
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSharefile:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
CeilChunkSize: fstests.NextPowerOfTwo,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
193
backend/sharefile/tzdata_vfsdata.go
Normal file
193
backend/sharefile/tzdata_vfsdata.go
Normal file
File diff suppressed because one or more lines are too long
18
backend/sharefile/update-timezone.sh
Executable file
18
backend/sharefile/update-timezone.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
# Extract just the America/New_York timezone from
|
||||
tzinfo=$(go env GOROOT)/lib/time/zoneinfo.zip
|
||||
|
||||
rm -rf tzdata
|
||||
mkdir tzdata
|
||||
cd tzdata
|
||||
unzip ${tzinfo} America/New_York
|
||||
|
||||
cd ..
|
||||
# Make the embedded assets
|
||||
go run generate_tzdata.go
|
||||
|
||||
# tidy up
|
||||
rm -rf tzdata
|
||||
261
backend/sharefile/upload.go
Normal file
261
backend/sharefile/upload.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Upload large files for sharefile
|
||||
//
|
||||
// Docs - https://api.sharefile.com/rest/docs/resource.aspx?name=Items#Upload_File
|
||||
|
||||
package sharefile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/sharefile/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
ctx context.Context
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
info *api.UploadSpecification // where to post chunks etc
|
||||
threads int // number of threads to use in upload
|
||||
streamed bool // set if using streamed upload
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, info *api.UploadSpecification) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(-1)
|
||||
if size >= 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
var streamed bool
|
||||
switch strings.ToLower(info.Method) {
|
||||
case "streamed":
|
||||
streamed = true
|
||||
case "threaded":
|
||||
streamed = false
|
||||
default:
|
||||
return nil, errors.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||
}
|
||||
|
||||
threads := fs.Config.Transfers
|
||||
if threads > info.MaxNumberOfThreads {
|
||||
threads = info.MaxNumberOfThreads
|
||||
}
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
ctx: ctx,
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
size: size,
|
||||
threads: threads,
|
||||
info: info,
|
||||
parts: parts,
|
||||
streamed: streamed,
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// parse the api.UploadFinishResponse in respBody
|
||||
func (up *largeUpload) parseUploadFinishResponse(respBody []byte) (err error) {
|
||||
var finish api.UploadFinishResponse
|
||||
err = json.Unmarshal(respBody, &finish)
|
||||
if err != nil {
|
||||
// Sometimes the unmarshal fails in which case return the body
|
||||
return errors.Errorf("upload: bad response: %q", bytes.TrimSpace(respBody))
|
||||
}
|
||||
return up.o.checkUploadResponse(up.ctx, &finish)
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, offset int64, body []byte, fileHash string) error {
|
||||
md5sumRaw := md5.Sum(body)
|
||||
md5sum := hex.EncodeToString(md5sumRaw[:])
|
||||
size := int64(len(body))
|
||||
|
||||
// Add some more parameters to the ChunkURI
|
||||
u := up.info.ChunkURI
|
||||
u += fmt.Sprintf("&index=%d&byteOffset=%d&hash=%s&fmt=json",
|
||||
part, offset, md5sum,
|
||||
)
|
||||
if fileHash != "" {
|
||||
u += fmt.Sprintf("&finish=true&fileSize=%d&fileHash=%s",
|
||||
offset+int64(len(body)),
|
||||
fileHash,
|
||||
)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: u,
|
||||
ContentLength: &size,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
opts.Body = up.wrap(bytes.NewReader(body))
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
}
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
return err
|
||||
}
|
||||
// If last chunk and using "streamed" transfer, get the response back now
|
||||
if up.streamed && fileHash != "" {
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
return nil
|
||||
}
|
||||
|
||||
// finish closes off the large upload and reads the metadata
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload")
|
||||
// For a streamed transfer we will already have read the info
|
||||
if up.streamed {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: up.info.FinishURI,
|
||||
}
|
||||
var respBody []byte
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
respBody, err = rest.ReadBody(resp)
|
||||
// retry all errors now that the multipart upload has started
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.parseUploadFinishResponse(respBody)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
if up.parts >= 0 {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks", up.parts)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Starting streaming upload of large file")
|
||||
}
|
||||
var (
|
||||
offset int64
|
||||
errs = make(chan error, 1)
|
||||
wg sync.WaitGroup
|
||||
err error
|
||||
wholeFileHash = md5.New()
|
||||
eof = false
|
||||
)
|
||||
outer:
|
||||
for part := int64(0); !eof; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = readers.ReadFill(up.in, buf)
|
||||
if err == io.EOF {
|
||||
eof = true
|
||||
buf = buf[:n]
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Hash it
|
||||
_, _ = io.Copy(wholeFileHash, bytes.NewBuffer(buf))
|
||||
|
||||
// Get file hash if was last chunk
|
||||
fileHash := ""
|
||||
if eof {
|
||||
fileHash = hex.EncodeToString(wholeFileHash.Sum(nil))
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
transferChunk := func(part, offset int64, buf []byte, fileHash string) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(ctx, part, offset, buf, fileHash)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
if up.streamed {
|
||||
transferChunk(part, offset, buf, fileHash) // streamed
|
||||
} else {
|
||||
go transferChunk(part, offset, buf, fileHash) // multithreaded
|
||||
}
|
||||
|
||||
offset += int64(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// check size read is correct
|
||||
if eof && err == nil && up.size >= 0 && up.size != offset {
|
||||
err = errors.Errorf("upload: short read: read %d bytes expected %d", up.size, offset)
|
||||
}
|
||||
|
||||
// read any errors
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// finish regardless of errors
|
||||
finishErr := up.finish(ctx)
|
||||
if err == nil {
|
||||
err = finishErr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -60,6 +61,8 @@ copy operations.`,
|
||||
Advanced: true,
|
||||
}}
|
||||
|
||||
const enc = encodings.Swift
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -320,7 +323,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return enc.FromStandardName(container), enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
@@ -441,9 +445,10 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
var err error
|
||||
encodedDirectory := enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, f.rootDirectory)
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, encodedDirectory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
@@ -553,17 +558,18 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
if !recurse {
|
||||
isDirectory = strings.HasSuffix(object.Name, "/")
|
||||
}
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
remote := enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
if object.Name == prefix {
|
||||
if remote == prefix {
|
||||
// If we have zero length directory markers ending in / then swift
|
||||
// will return them in the listing for the directory which causes
|
||||
// duplicate directories. Ignore them here.
|
||||
continue
|
||||
}
|
||||
remote := object.Name[len(prefix):]
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
@@ -635,7 +641,7 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
}
|
||||
for _, container := range containers {
|
||||
f.cache.MarkOK(container.Name)
|
||||
d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
d := fs.NewDir(enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
@@ -32,13 +33,13 @@ type CookieResponse struct {
|
||||
FedAuth http.Cookie
|
||||
}
|
||||
|
||||
// SuccessResponse hold a response from the sharepoint webdav
|
||||
type SuccessResponse struct {
|
||||
// SharepointSuccessResponse holds a response from a successful microsoft login
|
||||
type SharepointSuccessResponse struct {
|
||||
XMLName xml.Name `xml:"Envelope"`
|
||||
Succ SuccessResponseBody `xml:"Body"`
|
||||
Body SuccessResponseBody `xml:"Body"`
|
||||
}
|
||||
|
||||
// SuccessResponseBody is the body of a success response, it holds the token
|
||||
// SuccessResponseBody is the body of a successful response, it holds the token
|
||||
type SuccessResponseBody struct {
|
||||
XMLName xml.Name
|
||||
Type string `xml:"RequestSecurityTokenResponse>TokenType"`
|
||||
@@ -47,6 +48,24 @@ type SuccessResponseBody struct {
|
||||
Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"`
|
||||
}
|
||||
|
||||
// SharepointError holds a error response microsoft login
|
||||
type SharepointError struct {
|
||||
XMLName xml.Name `xml:"Envelope"`
|
||||
Body ErrorResponseBody `xml:"Body"`
|
||||
}
|
||||
|
||||
func (e *SharepointError) Error() string {
|
||||
return fmt.Sprintf("%s: %s (%s)", e.Body.FaultCode, e.Body.Reason, e.Body.Detail)
|
||||
}
|
||||
|
||||
// ErrorResponseBody contains the body of a erroneous repsonse
|
||||
type ErrorResponseBody struct {
|
||||
XMLName xml.Name
|
||||
FaultCode string `xml:"Fault>Code>Subcode>Value"`
|
||||
Reason string `xml:"Fault>Reason>Text"`
|
||||
Detail string `xml:"Fault>Detail>error>internalerror>text"`
|
||||
}
|
||||
|
||||
// reqString is a template that gets populated with the user data in order to retrieve a "BinarySecurityToken"
|
||||
const reqString = `<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope"
|
||||
xmlns:a="http://www.w3.org/2005/08/addressing"
|
||||
@@ -100,7 +119,7 @@ func (ca *CookieAuth) Cookies(ctx context.Context) (*CookieResponse, error) {
|
||||
return ca.getSPCookie(tokenResp)
|
||||
}
|
||||
|
||||
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
||||
func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieResponse, error) {
|
||||
spRoot, err := url.Parse(ca.endpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
@@ -123,7 +142,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
||||
}
|
||||
|
||||
// Send the previously acquired Token as a Post parameter
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Body.Token)); err != nil {
|
||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
||||
}
|
||||
|
||||
@@ -141,7 +160,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
||||
return &cookieResponse, nil
|
||||
}
|
||||
|
||||
func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SuccessResponse, err error) {
|
||||
func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessResponse, err error) {
|
||||
reqData := map[string]interface{}{
|
||||
"Username": ca.user,
|
||||
"Password": ca.pass,
|
||||
@@ -177,12 +196,21 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SuccessResponse, er
|
||||
}
|
||||
s := respBuf.Bytes()
|
||||
|
||||
conf = &SuccessResponse{}
|
||||
conf = &SharepointSuccessResponse{}
|
||||
err = xml.Unmarshal(s, conf)
|
||||
if err != nil {
|
||||
// FIXME: Try to parse with FailedResponse struct (check for server error code)
|
||||
return nil, errors.Wrap(err, "Error while reading endpoint response")
|
||||
if conf.Body.Token == "" {
|
||||
// xml Unmarshal won't fail if the response doesn't contain a token
|
||||
// However, the token will be empty
|
||||
sErr := &SharepointError{}
|
||||
|
||||
errSErr := xml.Unmarshal(s, sErr)
|
||||
if errSErr == nil {
|
||||
return nil, sErr
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while reading endpoint response")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -29,6 +30,8 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const enc = encodings.Yandex
|
||||
|
||||
//oAuth
|
||||
const (
|
||||
rcloneClientID = "ac39b43b9eba4cae8ffb788c06d816a8"
|
||||
@@ -207,7 +210,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", path)
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
|
||||
if options.SortMode != nil {
|
||||
opts.Parameters.Set("sort", options.SortMode.String())
|
||||
@@ -234,6 +237,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info.Name = enc.ToStandardName(info.Name)
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
@@ -360,6 +364,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if info.ResourceType == "dir" {
|
||||
//list all subdirs
|
||||
for _, element := range info.Embedded.Items {
|
||||
element.Name = enc.ToStandardName(element.Name)
|
||||
remote := path.Join(dir, element.Name)
|
||||
entry, err := f.itemToDirEntry(ctx, remote, &element)
|
||||
if err != nil {
|
||||
@@ -458,14 +463,18 @@ func (f *Fs) CreateDir(ctx context.Context, path string) (err error) {
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", path)
|
||||
// If creating a directory with a : use (undocumented) disk: prefix
|
||||
if strings.IndexRune(path, ':') >= 0 {
|
||||
path = "disk:" + path
|
||||
}
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("CreateDir Error: %s\n", err.Error())
|
||||
// fmt.Printf("CreateDir %q Error: %s\n", path, err.Error())
|
||||
return err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
@@ -572,7 +581,7 @@ func (f *Fs) delete(ctx context.Context, path string, hardDelete bool) (err erro
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", path)
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(path))
|
||||
opts.Parameters.Set("permanently", strconv.FormatBool(hardDelete))
|
||||
|
||||
var resp *http.Response
|
||||
@@ -644,8 +653,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dst string, overwrite
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("from", src)
|
||||
opts.Parameters.Set("path", dst)
|
||||
opts.Parameters.Set("from", enc.FromStandardPath(src))
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(dst))
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
var resp *http.Response
|
||||
@@ -794,12 +803,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: path,
|
||||
Path: enc.FromStandardPath(path),
|
||||
Parameters: url.Values{},
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", f.filePath(remote))
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(f.filePath(remote)))
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -985,7 +994,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
rcm := map[string]interface{}{
|
||||
property: value,
|
||||
}
|
||||
@@ -1022,7 +1031,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &dl)
|
||||
@@ -1059,7 +1068,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.filePath())
|
||||
opts.Parameters.Set("path", enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -9,6 +9,7 @@ package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
@@ -349,6 +350,8 @@ func untar(srcFile, fileName, extractDir string) {
|
||||
log.Fatalf("Couldn't open gzip: %v", err)
|
||||
}
|
||||
in = gzf
|
||||
} else if srcExt == ".bz2" {
|
||||
in = bzip2.NewReader(f)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(in)
|
||||
|
||||
@@ -31,6 +31,8 @@ docs = [
|
||||
"b2.md",
|
||||
"box.md",
|
||||
"cache.md",
|
||||
"chunker.md",
|
||||
"sharefile.md",
|
||||
"crypt.md",
|
||||
"dropbox.md",
|
||||
"ftp.md",
|
||||
|
||||
@@ -299,6 +299,9 @@ func (fsys *FS) Open(path string, flags int) (errc int, fh uint64) {
|
||||
return translateError(err), fhUnset
|
||||
}
|
||||
|
||||
// FIXME add support for unknown length files setting direct_io
|
||||
// See: https://github.com/billziss-gh/cgofuse/issues/38
|
||||
|
||||
return 0, fsys.openHandle(handle)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -26,7 +28,8 @@ The output is in the same format as md5sum and sha1sum.
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.DropboxHashSum(context.Background(), fsrc, os.Stdout)
|
||||
dbHashType := hash.RegisterHash("Dropbox", 64, dbhash.New)
|
||||
return operations.HashLister(context.Background(), dbHashType, fsrc, os.Stdout)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ Then
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
if len(args) == 0 {
|
||||
fmt.Printf("Supported hashes are:\n")
|
||||
for _, ht := range hash.Supported.Array() {
|
||||
for _, ht := range hash.Supported().Array() {
|
||||
fmt.Printf(" * %v\n", ht)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -316,7 +316,11 @@ func showBackend(name string) {
|
||||
optionsType = "advanced"
|
||||
for _, opt := range opts {
|
||||
done[opt.Name] = struct{}{}
|
||||
fmt.Printf("#### --%s\n\n", opt.FlagName(backend.Prefix))
|
||||
shortOpt := ""
|
||||
if opt.ShortOpt != "" {
|
||||
shortOpt = fmt.Sprintf(" / -%s", opt.ShortOpt)
|
||||
}
|
||||
fmt.Printf("#### --%s%s\n\n", opt.FlagName(backend.Prefix), shortOpt)
|
||||
fmt.Printf("%s\n\n", opt.Help)
|
||||
fmt.Printf("- Config: %s\n", opt.Name)
|
||||
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
|
||||
|
||||
222
cmd/info/info.go
222
cmd/info/info.go
@@ -6,15 +6,21 @@ package info
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/info/internal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
@@ -22,28 +28,24 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type position int
|
||||
|
||||
const (
|
||||
positionMiddle position = 1 << iota
|
||||
positionLeft
|
||||
positionRight
|
||||
positionNone position = 0
|
||||
positionAll position = positionRight<<1 - 1
|
||||
)
|
||||
|
||||
var (
|
||||
writeJSON string
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
positionList = []position{positionMiddle, positionLeft, positionRight}
|
||||
uploadWait time.Duration
|
||||
positionLeftRe = regexp.MustCompile(`(?s)^(.*)-position-left-([[:xdigit:]]+)$`)
|
||||
positionMiddleRe = regexp.MustCompile(`(?s)^position-middle-([[:xdigit:]]+)-(.*)-$`)
|
||||
positionRightRe = regexp.MustCompile(`(?s)^position-right-([[:xdigit:]]+)-(.*)$`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().StringVarP(&writeJSON, "write-json", "", "", "Write results to file.")
|
||||
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
|
||||
commandDefintion.Flags().DurationVarP(&uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
|
||||
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
}
|
||||
@@ -72,7 +74,8 @@ type results struct {
|
||||
ctx context.Context
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
stringNeedsEscaping map[string]position
|
||||
stringNeedsEscaping map[string]internal.Position
|
||||
controlResults map[string]internal.ControlResult
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
@@ -84,7 +87,8 @@ func newResults(ctx context.Context, f fs.Fs) *results {
|
||||
return &results{
|
||||
ctx: ctx,
|
||||
f: f,
|
||||
stringNeedsEscaping: make(map[string]position),
|
||||
stringNeedsEscaping: make(map[string]internal.Position),
|
||||
controlResults: make(map[string]internal.ControlResult),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,12 +98,14 @@ func (r *results) Print() {
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.stringNeedsEscaping {
|
||||
if needsEscape != positionNone {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
if needsEscape != internal.PositionNone {
|
||||
k := strconv.Quote(c)
|
||||
k = k[1 : len(k)-1]
|
||||
escape = append(escape, fmt.Sprintf("'%s'", k))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("stringNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("stringNeedsEscaping = []rune{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
@@ -116,11 +122,53 @@ func (r *results) Print() {
|
||||
}
|
||||
}
|
||||
|
||||
// WriteJSON writes the results to a JSON file when requested
|
||||
func (r *results) WriteJSON() {
|
||||
if writeJSON == "" {
|
||||
return
|
||||
}
|
||||
|
||||
report := internal.InfoReport{
|
||||
Remote: r.f.Name(),
|
||||
}
|
||||
if checkControl {
|
||||
report.ControlCharacters = &r.controlResults
|
||||
}
|
||||
if checkLength {
|
||||
report.MaxFileLength = &r.maxFileLength
|
||||
}
|
||||
if checkNormalization {
|
||||
report.CanWriteUnnormalized = &r.canWriteUnnormalized
|
||||
report.CanReadUnnormalized = &r.canReadUnnormalized
|
||||
report.CanReadRenormalized = &r.canReadRenormalized
|
||||
}
|
||||
if checkStreaming {
|
||||
report.CanStream = &r.canStream
|
||||
}
|
||||
|
||||
if f, err := os.Create(writeJSON); err != nil {
|
||||
fs.Errorf(r.f, "Creating JSON file failed: %s", err)
|
||||
} else {
|
||||
defer fs.CheckClose(f, &err)
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
err := enc.Encode(report)
|
||||
if err != nil {
|
||||
fs.Errorf(r.f, "Writing JSON file failed: %s", err)
|
||||
}
|
||||
}
|
||||
fs.Infof(r.f, "Wrote JSON file: %s", writeJSON)
|
||||
}
|
||||
|
||||
// writeFile writes a file with some random contents
|
||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
contents := random.String(50)
|
||||
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||
obj, err := r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||
if uploadWait > 0 {
|
||||
time.Sleep(uploadWait)
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// check whether normalization is enforced and check whether it is
|
||||
@@ -144,45 +192,55 @@ func (r *results) checkUTF8Normalization() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *results) checkStringPositions(s string) {
|
||||
func (r *results) checkStringPositions(k, s string) {
|
||||
fs.Infof(r.f, "Writing position file 0x%0X", s)
|
||||
positionError := positionNone
|
||||
positionError := internal.PositionNone
|
||||
res := internal.ControlResult{
|
||||
Text: s,
|
||||
WriteError: make(map[internal.Position]string, 3),
|
||||
GetError: make(map[internal.Position]string, 3),
|
||||
InList: make(map[internal.Position]internal.Presence, 3),
|
||||
}
|
||||
|
||||
for _, pos := range positionList {
|
||||
for _, pos := range internal.PositionList {
|
||||
path := ""
|
||||
switch pos {
|
||||
case positionMiddle:
|
||||
case internal.PositionMiddle:
|
||||
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
|
||||
case positionLeft:
|
||||
case internal.PositionLeft:
|
||||
path = fmt.Sprintf("%s-position-left-%0X", s, s)
|
||||
case positionRight:
|
||||
case internal.PositionRight:
|
||||
path = fmt.Sprintf("position-right-%0X-%s", s, s)
|
||||
default:
|
||||
panic("invalid position: " + pos.String())
|
||||
}
|
||||
_, writeErr := r.writeFile(path)
|
||||
if writeErr != nil {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
|
||||
_, writeError := r.writeFile(path)
|
||||
if writeError != nil {
|
||||
res.WriteError[pos] = writeError.Error()
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeError)
|
||||
} else {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
obj, getErr := r.f.NewObject(r.ctx, path)
|
||||
if getErr != nil {
|
||||
res.GetError[pos] = getErr.Error()
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
||||
} else {
|
||||
if obj.Size() != 50 {
|
||||
res.GetError[pos] = fmt.Sprintf("invalid size %d", obj.Size())
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
|
||||
} else {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
}
|
||||
if writeErr != nil || getErr != nil {
|
||||
if writeError != nil || getErr != nil {
|
||||
positionError += pos
|
||||
}
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.stringNeedsEscaping[s] = positionError
|
||||
r.stringNeedsEscaping[k] = positionError
|
||||
r.controlResults[k] = res
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -199,30 +257,97 @@ func (r *results) checkControls() {
|
||||
s := string(i)
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.stringNeedsEscaping[s] = positionAll
|
||||
r.stringNeedsEscaping[s] = internal.PositionAll
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkStringPositions(s)
|
||||
k := s
|
||||
r.checkStringPositions(k, s)
|
||||
tokens <- token
|
||||
}(s)
|
||||
}
|
||||
for _, s := range []string{"\", "\xBF", "\xFE"} {
|
||||
for _, s := range []string{"\", "\u00A0", "\xBF", "\xFE"} {
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkStringPositions(s)
|
||||
k := s
|
||||
r.checkStringPositions(k, s)
|
||||
tokens <- token
|
||||
}(s)
|
||||
}
|
||||
wg.Wait()
|
||||
r.checkControlsList()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
}
|
||||
|
||||
func (r *results) checkControlsList() {
|
||||
l, err := r.f.List(context.TODO(), "")
|
||||
if err != nil {
|
||||
fs.Errorf(r.f, "Listing control character file names failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
namesMap := make(map[string]struct{}, len(l))
|
||||
for _, s := range l {
|
||||
namesMap[path.Base(s.Remote())] = struct{}{}
|
||||
}
|
||||
|
||||
for path := range namesMap {
|
||||
var pos internal.Position
|
||||
var hex, value string
|
||||
if g := positionLeftRe.FindStringSubmatch(path); g != nil {
|
||||
pos, hex, value = internal.PositionLeft, g[2], g[1]
|
||||
} else if g := positionMiddleRe.FindStringSubmatch(path); g != nil {
|
||||
pos, hex, value = internal.PositionMiddle, g[1], g[2]
|
||||
} else if g := positionRightRe.FindStringSubmatch(path); g != nil {
|
||||
pos, hex, value = internal.PositionRight, g[1], g[2]
|
||||
} else {
|
||||
fs.Infof(r.f, "Unknown path %q", path)
|
||||
continue
|
||||
}
|
||||
var hexValue []byte
|
||||
for ; len(hex) >= 2; hex = hex[2:] {
|
||||
if b, err := strconv.ParseUint(hex[:2], 16, 8); err != nil {
|
||||
fs.Infof(r.f, "Invalid path %q: %s", path, err)
|
||||
continue
|
||||
} else {
|
||||
hexValue = append(hexValue, byte(b))
|
||||
}
|
||||
}
|
||||
if hex != "" {
|
||||
fs.Infof(r.f, "Invalid path %q", path)
|
||||
continue
|
||||
}
|
||||
|
||||
hexStr := string(hexValue)
|
||||
k := hexStr
|
||||
switch r.controlResults[k].InList[pos] {
|
||||
case internal.Absent:
|
||||
if hexStr == value {
|
||||
r.controlResults[k].InList[pos] = internal.Present
|
||||
} else {
|
||||
r.controlResults[k].InList[pos] = internal.Renamed
|
||||
}
|
||||
case internal.Present:
|
||||
r.controlResults[k].InList[pos] = internal.Multiple
|
||||
case internal.Renamed:
|
||||
r.controlResults[k].InList[pos] = internal.Multiple
|
||||
}
|
||||
delete(namesMap, path)
|
||||
}
|
||||
|
||||
if len(namesMap) > 0 {
|
||||
fs.Infof(r.f, "Found additional control character file names:")
|
||||
for name := range namesMap {
|
||||
fs.Infof(r.f, "%q", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the max file name size we can use
|
||||
func (r *results) findMaxLength() {
|
||||
const maxLen = 16 * 1024
|
||||
@@ -314,37 +439,6 @@ func readInfo(ctx context.Context, f fs.Fs) error {
|
||||
r.checkStreaming()
|
||||
}
|
||||
r.Print()
|
||||
r.WriteJSON()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e position) String() string {
|
||||
switch e {
|
||||
case positionNone:
|
||||
return "none"
|
||||
case positionAll:
|
||||
return "all"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if e&positionMiddle != 0 {
|
||||
buf.WriteString("middle")
|
||||
e &= ^positionMiddle
|
||||
}
|
||||
if e&positionLeft != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("left")
|
||||
e &= ^positionLeft
|
||||
}
|
||||
if e&positionRight != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("right")
|
||||
e &= ^positionRight
|
||||
}
|
||||
if e != positionNone {
|
||||
panic("invalid position")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
158
cmd/info/internal/build_csv/main.go
Normal file
158
cmd/info/internal/build_csv/main.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/cmd/info/internal"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fOut := flag.String("o", "out.csv", "Output file")
|
||||
flag.Parse()
|
||||
|
||||
args := flag.Args()
|
||||
remotes := make([]internal.InfoReport, 0, len(args))
|
||||
for _, fn := range args {
|
||||
f, err := os.Open(fn)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to open %q: %s", fn, err)
|
||||
}
|
||||
var remote internal.InfoReport
|
||||
dec := json.NewDecoder(f)
|
||||
err = dec.Decode(&remote)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to decode %q: %s", fn, err)
|
||||
}
|
||||
if remote.ControlCharacters == nil {
|
||||
log.Printf("Skipping remote %s: no ControlCharacters", remote.Remote)
|
||||
} else {
|
||||
remotes = append(remotes, remote)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatalf("Closing %q failed: %s", fn, err)
|
||||
}
|
||||
}
|
||||
|
||||
charsMap := make(map[string]string)
|
||||
var remoteNames []string
|
||||
for _, r := range remotes {
|
||||
remoteNames = append(remoteNames, r.Remote)
|
||||
for k, v := range *r.ControlCharacters {
|
||||
v.Text = k
|
||||
quoted := strconv.Quote(k)
|
||||
charsMap[k] = quoted[1 : len(quoted)-1]
|
||||
}
|
||||
}
|
||||
sort.Strings(remoteNames)
|
||||
|
||||
chars := make([]string, 0, len(charsMap))
|
||||
for k := range charsMap {
|
||||
chars = append(chars, k)
|
||||
}
|
||||
sort.Strings(chars)
|
||||
|
||||
// char remote output
|
||||
recordsMap := make(map[string]map[string][]string)
|
||||
// remote output
|
||||
hRemoteMap := make(map[string][]string)
|
||||
hOperation := []string{"Write", "Write", "Write", "Get", "Get", "Get", "List", "List", "List"}
|
||||
hPosition := []string{"L", "M", "R", "L", "M", "R", "L", "M", "R"}
|
||||
|
||||
// remote
|
||||
// write get list
|
||||
// left middle right left middle right left middle right
|
||||
|
||||
for _, r := range remotes {
|
||||
hRemoteMap[r.Remote] = []string{r.Remote, "", "", "", "", "", "", "", ""}
|
||||
for k, v := range *r.ControlCharacters {
|
||||
cMap, ok := recordsMap[k]
|
||||
if !ok {
|
||||
cMap = make(map[string][]string, 1)
|
||||
recordsMap[k] = cMap
|
||||
}
|
||||
|
||||
cMap[r.Remote] = []string{
|
||||
sok(v.WriteError[internal.PositionLeft]), sok(v.WriteError[internal.PositionMiddle]), sok(v.WriteError[internal.PositionRight]),
|
||||
sok(v.GetError[internal.PositionLeft]), sok(v.GetError[internal.PositionMiddle]), sok(v.GetError[internal.PositionRight]),
|
||||
pok(v.InList[internal.PositionLeft]), pok(v.InList[internal.PositionMiddle]), pok(v.InList[internal.PositionRight]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
records := [][]string{
|
||||
[]string{"", ""},
|
||||
[]string{"", ""},
|
||||
[]string{"Bytes", "Char"},
|
||||
}
|
||||
for _, r := range remoteNames {
|
||||
records[0] = append(records[0], hRemoteMap[r]...)
|
||||
records[1] = append(records[1], hOperation...)
|
||||
records[2] = append(records[2], hPosition...)
|
||||
}
|
||||
for _, c := range chars {
|
||||
k := charsMap[c]
|
||||
row := []string{fmt.Sprintf("%X", c), k}
|
||||
for _, r := range remoteNames {
|
||||
if m, ok := recordsMap[c][r]; ok {
|
||||
row = append(row, m...)
|
||||
} else {
|
||||
row = append(row, "", "", "", "", "", "", "", "", "")
|
||||
}
|
||||
}
|
||||
records = append(records, row)
|
||||
}
|
||||
|
||||
var writer io.Writer
|
||||
if *fOut == "-" {
|
||||
writer = os.Stdout
|
||||
} else {
|
||||
f, err := os.Create(*fOut)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create %q: %s", *fOut, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatalln("Error writing csv:", err)
|
||||
}
|
||||
}()
|
||||
writer = f
|
||||
}
|
||||
|
||||
w := csv.NewWriter(writer)
|
||||
err := w.WriteAll(records)
|
||||
if err != nil {
|
||||
log.Fatalln("Error writing csv:", err)
|
||||
} else if err := w.Error(); err != nil {
|
||||
log.Fatalln("Error writing csv:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func sok(s string) string {
|
||||
if s != "" {
|
||||
return "ERR"
|
||||
}
|
||||
return "OK"
|
||||
}
|
||||
|
||||
func pok(p internal.Presence) string {
|
||||
switch p {
|
||||
case internal.Absent:
|
||||
return "MIS"
|
||||
case internal.Present:
|
||||
return "OK"
|
||||
case internal.Renamed:
|
||||
return "REN"
|
||||
case internal.Multiple:
|
||||
return "MUL"
|
||||
default:
|
||||
return "ERR"
|
||||
}
|
||||
}
|
||||
156
cmd/info/internal/internal.go
Normal file
156
cmd/info/internal/internal.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Presence describes the presence of a filename in file listing
|
||||
type Presence int
|
||||
|
||||
// Possible Presence states
|
||||
const (
|
||||
Absent Presence = iota
|
||||
Present
|
||||
Renamed
|
||||
Multiple
|
||||
)
|
||||
|
||||
// Position is the placement of the test character in the filename
|
||||
type Position int
|
||||
|
||||
// Predefined positions
|
||||
const (
|
||||
PositionMiddle Position = 1 << iota
|
||||
PositionLeft
|
||||
PositionRight
|
||||
PositionNone Position = 0
|
||||
PositionAll Position = PositionRight<<1 - 1
|
||||
)
|
||||
|
||||
// PositionList contains all valid positions
|
||||
var PositionList = []Position{PositionMiddle, PositionLeft, PositionRight}
|
||||
|
||||
// ControlResult contains the result of a single character test
|
||||
type ControlResult struct {
|
||||
Text string `json:"-"`
|
||||
WriteError map[Position]string
|
||||
GetError map[Position]string
|
||||
InList map[Position]Presence
|
||||
}
|
||||
|
||||
// InfoReport is the structure of the JSON output
|
||||
type InfoReport struct {
|
||||
Remote string
|
||||
ControlCharacters *map[string]ControlResult
|
||||
MaxFileLength *int
|
||||
CanStream *bool
|
||||
CanWriteUnnormalized *bool
|
||||
CanReadUnnormalized *bool
|
||||
CanReadRenormalized *bool
|
||||
}
|
||||
|
||||
func (e Position) String() string {
|
||||
switch e {
|
||||
case PositionNone:
|
||||
return "none"
|
||||
case PositionAll:
|
||||
return "all"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if e&PositionMiddle != 0 {
|
||||
buf.WriteString("middle")
|
||||
e &= ^PositionMiddle
|
||||
}
|
||||
if e&PositionLeft != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("left")
|
||||
e &= ^PositionLeft
|
||||
}
|
||||
if e&PositionRight != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("right")
|
||||
e &= ^PositionRight
|
||||
}
|
||||
if e != PositionNone {
|
||||
panic("invalid position")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// MarshalText encodes the position when used as a map key
|
||||
func (e Position) MarshalText() ([]byte, error) {
|
||||
return []byte(e.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText decodes a position when used as a map key
|
||||
func (e *Position) UnmarshalText(text []byte) error {
|
||||
switch s := strings.ToLower(string(text)); s {
|
||||
default:
|
||||
*e = PositionNone
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
switch p {
|
||||
case "left":
|
||||
*e |= PositionLeft
|
||||
case "middle":
|
||||
*e |= PositionMiddle
|
||||
case "right":
|
||||
*e |= PositionRight
|
||||
default:
|
||||
return fmt.Errorf("unknown position: %s", e)
|
||||
}
|
||||
}
|
||||
case "none":
|
||||
*e = PositionNone
|
||||
case "all":
|
||||
*e = PositionAll
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e Presence) String() string {
|
||||
switch e {
|
||||
case Absent:
|
||||
return "absent"
|
||||
case Present:
|
||||
return "present"
|
||||
case Renamed:
|
||||
return "renamed"
|
||||
case Multiple:
|
||||
return "multiple"
|
||||
default:
|
||||
panic("invalid presence")
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON encodes the presence when used as a JSON value
|
||||
func (e Presence) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(e.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes a presence when used as a JSON value
|
||||
func (e *Presence) UnmarshalJSON(text []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(text, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
switch s := strings.ToLower(s); s {
|
||||
case "absent":
|
||||
*e = Absent
|
||||
case "present":
|
||||
*e = Present
|
||||
case "renamed":
|
||||
*e = Renamed
|
||||
case "multiple":
|
||||
*e = Multiple
|
||||
default:
|
||||
return fmt.Errorf("unknown presence: %s", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
set -euo pipefail
|
||||
|
||||
for f in info-*.log; do
|
||||
for pos in middle left right; do
|
||||
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
|
||||
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
|
||||
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
|
||||
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
|
||||
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
for pos in middle left right; do
|
||||
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
|
||||
echo "List\tList\tList"
|
||||
echo "Mid\tLeft\tRight"
|
||||
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
|
||||
done
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
|
||||
done
|
||||
paste *.full.csv > info-complete.csv
|
||||
@@ -1,3 +1,4 @@
|
||||
rclone.exe purge info
|
||||
rclone.exe info -vv info > info-LocalWindows.log 2>&1
|
||||
rclone.exe ls -vv info > info-LocalWindows.list 2>&1
|
||||
set RCLONE_CONFIG_LOCALWINDOWS_TYPE=local
|
||||
rclone.exe purge LocalWindows:info
|
||||
rclone.exe info -vv LocalWindows:info --write-json=info-LocalWindows.json > info-LocalWindows.log 2>&1
|
||||
rclone.exe ls -vv LocalWindows:info > info-LocalWindows.list 2>&1
|
||||
|
||||
@@ -7,17 +7,19 @@
|
||||
export PATH=$GOPATH/src/github.com/rclone/rclone:$PATH
|
||||
|
||||
typeset -A allRemotes
|
||||
allRemotes=(
|
||||
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
|
||||
allRemotes=(
|
||||
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5 --upload-wait=5s'
|
||||
TestB2 ''
|
||||
TestBox ''
|
||||
TestDrive '--tpslimit=5'
|
||||
TestCrypt ''
|
||||
TestDropbox '--checkers=1'
|
||||
TestGCS ''
|
||||
TestJottacloud ''
|
||||
TestKoofr ''
|
||||
TestMega ''
|
||||
TestOneDrive ''
|
||||
TestOpenDrive '--low-level-retries=2 --checkers=5'
|
||||
TestOpenDrive '--low-level-retries=4 --checkers=5'
|
||||
TestPcloud '--low-level-retries=2 --timeout=15s'
|
||||
TestS3 ''
|
||||
Local ''
|
||||
@@ -26,18 +28,25 @@ typeset -A allRemotes
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
set -- ${(k)allRemotes[@]}
|
||||
set -- ${(k)allRemotes[@]}
|
||||
elif [[ $1 = --list ]]; then
|
||||
printf '%s\n' ${(k)allRemotes[@]}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for remote; do
|
||||
dir=$remote:infotest
|
||||
if [[ $remote = Local ]]; then
|
||||
dir=infotest
|
||||
fi
|
||||
case $remote in
|
||||
Local)
|
||||
l=Local$(uname)
|
||||
export RCLONE_CONFIG_${l:u}_TYPE=local
|
||||
dir=$l:infotest;;
|
||||
TestGCS)
|
||||
dir=$remote:$GCS_BUCKET/infotest;;
|
||||
*)
|
||||
dir=$remote:infotest;;
|
||||
esac
|
||||
|
||||
rclone purge $dir || :
|
||||
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
|
||||
rclone info -vv $dir --write-json=info-$remote.json ${=allRemotes[$remote]:-} &> info-$remote.log
|
||||
rclone ls -vv $dir &> info-$remote.list
|
||||
done
|
||||
|
||||
@@ -73,6 +73,11 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR
|
||||
return nil, translateError(err)
|
||||
}
|
||||
|
||||
// If size unknown then use direct io to read
|
||||
if entry := handle.Node().DirEntry(); entry != nil && entry.Size() < 0 {
|
||||
resp.Flags |= fuse.OpenDirectIO
|
||||
}
|
||||
|
||||
return &FileHandle{handle}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,11 +3,15 @@
|
||||
package mount
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
if runtime.NumCPU() <= 2 {
|
||||
t.Skip("FIXME skipping mount tests as they lock up on <= 2 CPUs - See: https://github.com/rclone/rclone/issues/3154")
|
||||
}
|
||||
mounttest.RunTests(t, mount)
|
||||
}
|
||||
|
||||
@@ -39,8 +39,8 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
// DaemonTimeout defaults to non zero for macOS and freebsd
|
||||
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
|
||||
// DaemonTimeout defaults to non zero for macOS
|
||||
if runtime.GOOS == "darwin" {
|
||||
DaemonTimeout = 15 * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,6 +77,7 @@ func RunTests(t *testing.T, fn MountFn) {
|
||||
t.Run("TestWriteFileOverwrite", TestWriteFileOverwrite)
|
||||
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
|
||||
t.Run("TestWriteFileFsync", TestWriteFileFsync)
|
||||
t.Run("TestWriteFileDup", TestWriteFileDup)
|
||||
})
|
||||
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
|
||||
if !ok {
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileNoWrite tests writing a file with no write()'s to it
|
||||
@@ -82,3 +85,48 @@ func TestWriteFileFsync(t *testing.T) {
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
// TestWriteFileDup tests behavior of mmap() in Python by using dup() on a file handle
|
||||
func TestWriteFileDup(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
filepath := run.path("to be synced")
|
||||
fh, err := osCreate(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testData := []byte("0123456789")
|
||||
|
||||
err = fh.Truncate(int64(len(testData) + 2))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Sync()
|
||||
require.NoError(t, err)
|
||||
|
||||
var dupFd uintptr
|
||||
dupFd, err = writeTestDup(fh.Fd())
|
||||
require.NoError(t, err)
|
||||
|
||||
dupFile := os.NewFile(dupFd, fh.Name())
|
||||
_, err = dupFile.Write(testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dupFile.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fh.Seek(int64(len(testData)), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fh.Write([]byte("10"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
@@ -5,9 +5,21 @@ package mounttest
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
func TestWriteFileDoubleClose(t *testing.T) {
|
||||
t.Skip("not supported on " + runtime.GOOS)
|
||||
}
|
||||
|
||||
// writeTestDup performs the platform-specific implementation of the dup() syscall
|
||||
func writeTestDup(oldfd uintptr) (uintptr, error) {
|
||||
p, err := windows.GetCurrentProcess()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var h windows.Handle
|
||||
return uintptr(h), windows.DuplicateHandle(p, windows.Handle(oldfd), p, &h, 0, true, windows.DUPLICATE_SAME_ACCESS)
|
||||
}
|
||||
|
||||
@@ -4,10 +4,12 @@ package mounttest
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
@@ -21,14 +23,14 @@ func TestWriteFileDoubleClose(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
fd := out.Fd()
|
||||
|
||||
fd1, err := syscall.Dup(int(fd))
|
||||
fd1, err := unix.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
fd2, err := syscall.Dup(int(fd))
|
||||
fd2, err := unix.Dup(int(fd))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// close one of the dups - should produce no error
|
||||
err = syscall.Close(fd1)
|
||||
err = unix.Close(fd1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// write to the file
|
||||
@@ -41,14 +43,26 @@ func TestWriteFileDoubleClose(t *testing.T) {
|
||||
err = out.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// write to the other dup - should produce an error
|
||||
_, err = syscall.Write(fd2, buf)
|
||||
assert.Error(t, err, "input/output error")
|
||||
// write to the other dup
|
||||
_, err = unix.Write(fd2, buf)
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
// produces an error if cache mode < writes
|
||||
assert.Error(t, err, "input/output error")
|
||||
} else {
|
||||
// otherwise does not produce an error
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// close the dup - should not produce an error
|
||||
err = syscall.Close(fd2)
|
||||
err = unix.Close(fd2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
run.waitForWriters()
|
||||
run.rm(t, "testdoubleclose")
|
||||
}
|
||||
|
||||
// writeTestDup performs the platform-specific implementation of the dup() unix
|
||||
func writeTestDup(oldfd uintptr) (uintptr, error) {
|
||||
newfd, err := unix.Dup(int(oldfd))
|
||||
return uintptr(newfd), err
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,11 +26,6 @@ const (
|
||||
//
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
err := initTerminal()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to start progress: %v", err)
|
||||
return func() {}
|
||||
}
|
||||
stopStats := make(chan struct{})
|
||||
oldLogPrint := fs.LogPrint
|
||||
if !log.Redirected() {
|
||||
@@ -69,13 +63,6 @@ func startProgress() func() {
|
||||
}
|
||||
}
|
||||
|
||||
// VT100 codes
|
||||
const (
|
||||
eraseLine = "\x1b[2K"
|
||||
moveToStartOfLine = "\x1b[0G"
|
||||
moveUp = "\x1b[A"
|
||||
)
|
||||
|
||||
// state for the progress printing
|
||||
var (
|
||||
nlines = 0 // number of lines in the previous stats block
|
||||
@@ -88,11 +75,7 @@ func printProgress(logMessage string) {
|
||||
defer progressMu.Unlock()
|
||||
|
||||
var buf bytes.Buffer
|
||||
w, h, err := terminal.GetSize(int(os.Stdout.Fd()))
|
||||
if err != nil {
|
||||
w, h = 80, 25
|
||||
}
|
||||
_ = h
|
||||
w, _ := terminal.GetSize()
|
||||
stats := strings.TrimSpace(accounting.GlobalStats().String())
|
||||
logMessage = strings.TrimSpace(logMessage)
|
||||
|
||||
@@ -102,17 +85,17 @@ func printProgress(logMessage string) {
|
||||
|
||||
if logMessage != "" {
|
||||
out("\n")
|
||||
out(moveUp)
|
||||
out(terminal.MoveUp)
|
||||
}
|
||||
// Move to the start of the block we wrote erasing all the previous lines
|
||||
for i := 0; i < nlines-1; i++ {
|
||||
out(eraseLine)
|
||||
out(moveUp)
|
||||
out(terminal.EraseLine)
|
||||
out(terminal.MoveUp)
|
||||
}
|
||||
out(eraseLine)
|
||||
out(moveToStartOfLine)
|
||||
out(terminal.EraseLine)
|
||||
out(terminal.MoveToStartOfLine)
|
||||
if logMessage != "" {
|
||||
out(eraseLine)
|
||||
out(terminal.EraseLine)
|
||||
out(logMessage + "\n")
|
||||
}
|
||||
fixedLines := strings.Split(stats, "\n")
|
||||
@@ -126,5 +109,5 @@ func printProgress(logMessage string) {
|
||||
out("\n")
|
||||
}
|
||||
}
|
||||
writeToTerminal(buf.Bytes())
|
||||
terminal.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
//+build !windows
|
||||
|
||||
package cmd
|
||||
|
||||
import "os"
|
||||
|
||||
func initTerminal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
_, _ = os.Stdout.Write(b)
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
//+build windows
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
ansiterm "github.com/Azure/go-ansiterm"
|
||||
"github.com/Azure/go-ansiterm/winterm"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ansiParser *ansiterm.AnsiParser
|
||||
)
|
||||
|
||||
func initTerminal() error {
|
||||
winEventHandler := winterm.CreateWinEventHandler(os.Stdout.Fd(), os.Stdout)
|
||||
if winEventHandler == nil {
|
||||
err := syscall.GetLastError()
|
||||
if err == nil {
|
||||
err = errors.New("initialization failed")
|
||||
}
|
||||
return errors.Wrap(err, "windows terminal")
|
||||
}
|
||||
ansiParser = ansiterm.CreateParser("Ground", winEventHandler)
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTerminal(b []byte) {
|
||||
// Remove all non-ASCII characters until this is fixed
|
||||
// https://github.com/Azure/go-ansiterm/issues/26
|
||||
r := []rune(string(b))
|
||||
for i := range r {
|
||||
if r[i] >= 127 {
|
||||
r[i] = '.'
|
||||
}
|
||||
}
|
||||
b = []byte(string(r))
|
||||
_, err := ansiParser.Parse(b)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "\n*** Error from ANSI parser: %v\n", err)
|
||||
}
|
||||
}
|
||||
@@ -3,12 +3,14 @@ package rcd
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
@@ -179,6 +181,8 @@ func downloadFile(filepath string, url string) error {
|
||||
|
||||
// unzip is a helper function to unzip a file specified in src to path dest
|
||||
func unzip(src, dest string) (err error) {
|
||||
dest = filepath.Clean(dest) + string(os.PathSeparator)
|
||||
|
||||
r, err := zip.OpenReader(src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -191,14 +195,18 @@ func unzip(src, dest string) (err error) {
|
||||
|
||||
// Closure to address file descriptors issue with all the deferred .Close() methods
|
||||
extractAndWriteFile := func(f *zip.File) error {
|
||||
path := filepath.Join(dest, f.Name)
|
||||
// Check for Zip Slip: https://github.com/rclone/rclone/issues/3529
|
||||
if !strings.HasPrefix(path, dest) {
|
||||
return fmt.Errorf("%s: illegal file path", path)
|
||||
}
|
||||
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
|
||||
path := filepath.Join(dest, f.Name)
|
||||
|
||||
if f.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user