mirror of
https://github.com/rclone/rclone.git
synced 2025-12-31 23:53:18 +00:00
Compare commits
21 Commits
v1.49.5
...
azure-pipe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e69e87ea7 | ||
|
|
1259785329 | ||
|
|
de4a20894a | ||
|
|
af9a972760 | ||
|
|
6dde819d27 | ||
|
|
048ff6070c | ||
|
|
f3ae16aa1c | ||
|
|
8ef9846a4c | ||
|
|
7a8cd59ea6 | ||
|
|
a75457c738 | ||
|
|
bfe0f0ec2c | ||
|
|
fd1154828f | ||
|
|
7b687709cf | ||
|
|
812fcbbe2c | ||
|
|
85a15d39d0 | ||
|
|
cc39c4e775 | ||
|
|
4e46d26a0b | ||
|
|
0554daf3d8 | ||
|
|
f633996da6 | ||
|
|
9b74d1beb1 | ||
|
|
62a4bad5d2 |
50
.circleci/config.yml
Normal file
50
.circleci/config.yml
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/rclone/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
243
.github/workflows/build.yml
vendored
243
.github/workflows/build.yml
vendored
@@ -1,243 +0,0 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
||||
|
||||
name: build
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.10
|
||||
os: ubuntu-latest
|
||||
go: '1.10.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
if: matrix.deploy && github.head_ref == ''
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
@@ -118,7 +118,7 @@ but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
@@ -362,7 +362,9 @@ Or if you want to run the integration tests manually:
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If your remote defines `ListR` check with this also
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
49
DISABLED.appveyor.yml
Normal file
49
DISABLED.appveyor.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\rclone\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: sq9CPBbwaeKJv+yd24U44neORYPQVy6jsjnQptC+5yk=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
128
DISABLED.travis.yml
Normal file
128
DISABLED.travis.yml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/rclone/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GOMAXPROCS=8 # workaround for cmd/mount tests locking up - see #3154
|
||||
- GO111MODULE=off
|
||||
- GITHUB_USER=ncw
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Linux
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
- BUILD_FLAGS='-include "^linux/"'
|
||||
- DEPLOY=true
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
name: Go Modules / Race
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
script:
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- go: 1.12.x
|
||||
name: Other OS
|
||||
env:
|
||||
- DEPLOY=true
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.12.x
|
||||
name: macOS
|
||||
os: osx
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
- BUILD_FLAGS='-include "^darwin/" -cgo'
|
||||
- DEPLOY=true
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# name: Windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# - BUILD_FLAGS='-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,21 +0,0 @@
|
||||
FROM golang AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
WORKDIR /root/
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone .
|
||||
|
||||
ENTRYPOINT [ "./rclone" ]
|
||||
3216
MANUAL.html
generated
3216
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3525
MANUAL.txt
generated
3525
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
73
Makefile
73
Makefile
@@ -1,37 +1,23 @@
|
||||
SHELL = bash
|
||||
# Branch we are working on
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
# Version of last release (may not be on this branch)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
@@ -41,22 +27,19 @@ BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone test_all vars version
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -64,7 +47,8 @@ version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone test_all
|
||||
test: rclone
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
@@ -87,8 +71,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
@@ -184,14 +168,14 @@ endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
travis_beta:
|
||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
@@ -203,25 +187,24 @@ serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
[](https://rclone.org/)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -6,11 +6,10 @@
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
@@ -52,8 +51,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
|
||||
64
RELEASE.md
64
RELEASE.md
@@ -48,56 +48,24 @@ Can be fixed with
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.49
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-fixes
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on docker hub. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
```
|
||||
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.49.1
|
||||
docker push rclone/rclone:1.49
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
||||
212
azure-pipelines.yml
Normal file
212
azure-pipelines.yml
Normal file
@@ -0,0 +1,212 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
|
||||
trigger:
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
BUILD_FLAGS: '-include "^linux/"'
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows:
|
||||
imageName: windows-2019
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
MAKE_COMPILE_ALL: true
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
go1.9:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GOCACHE: '' # build caching only came in go1.10
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.8
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
echo "##vso[task.setvariable variable=GO_VERSION]$latestGo"
|
||||
echo "##vso[task.setvariable variable=GO_LATEST]true"
|
||||
echo "Latest Go version: $latestGo"
|
||||
condition: eq( variables['GO_VERSION'], 'latest' )
|
||||
displayName: "Get latest Go version"
|
||||
|
||||
- bash: |
|
||||
sudo rm -f $(which go)
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
- task: CacheBeta@0
|
||||
continueOnError: true
|
||||
inputs:
|
||||
key: go-build-cache | $(Agent.JobName)
|
||||
path: $(GOCACHE)
|
||||
displayName: Cache go build
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
- bash: |
|
||||
mkdir -p $(GOCACHE)
|
||||
echo "not empty" > $(GOCACHE)/not_empty.txt
|
||||
echo "GOCACHE=" $(GOCACHE)
|
||||
ls -R $(GOCACHE)
|
||||
continueOnError: true
|
||||
displayName: Create cache dir
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Libraries on Linux
|
||||
|
||||
- bash: |
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
choco install -y winfsp zip make
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
Write-Host "Downloading Go... (please be patient, I am very slow)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
|
||||
Write-Host "Extracting Go... (I'm slow too)"
|
||||
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# Display environment for debugging
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
# Run Tests
|
||||
|
||||
- bash: |
|
||||
make
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
condition: eq( variables['MAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
make check
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Code quality test
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make compile_all
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Compile all architectures test
|
||||
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make vars # FIXME travis_beta
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Deploy built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
@@ -143,20 +143,19 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote azure server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURLcacheMu sync.Mutex // mutex to protect cntURLcache
|
||||
cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
isLimited bool // if limited to one container
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // http client we are using
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
containerDeleted bool // true if we have deleted the container
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
// Object describes a azure object
|
||||
@@ -180,18 +179,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootContainer == "" {
|
||||
return fmt.Sprintf("Azure root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Azure container %s", f.container)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Azure container %s", f.rootContainer)
|
||||
}
|
||||
return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory)
|
||||
return fmt.Sprintf("Azure container %s path %s", f.container, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -199,23 +198,21 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a azure path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a azure 'url'
|
||||
func parsePath(path string) (container, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find container in azure path %q", path)
|
||||
} else {
|
||||
container, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
func (o *Object) split() (container, containerPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// validateAccessTier checks if azureblob supports user supplied tier
|
||||
func validateAccessTier(tier string) bool {
|
||||
switch tier {
|
||||
@@ -320,12 +317,6 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
@@ -347,6 +338,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if opt.ListChunkSize > maxListChunkSize {
|
||||
return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize)
|
||||
}
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
}
|
||||
@@ -361,25 +356,24 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
)
|
||||
switch {
|
||||
case opt.UseEmulator:
|
||||
@@ -393,6 +387,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
if err != nil {
|
||||
@@ -405,6 +400,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
@@ -415,30 +411,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if f.rootContainer != "" && parts.ContainerName != f.rootContainer {
|
||||
if container != "" && parts.ContainerName != container {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
containerURL := azblob.NewContainerURL(*u, pipeline)
|
||||
f.cntURLcache[parts.ContainerName] = &containerURL
|
||||
f.isLimited = true
|
||||
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
f.svcURL = &serviceURL
|
||||
f.cntURL = &containerURL
|
||||
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -449,20 +453,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// return the container URL for the container passed in
|
||||
func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) {
|
||||
f.cntURLcacheMu.Lock()
|
||||
defer f.cntURLcacheMu.Unlock()
|
||||
var ok bool
|
||||
if containerURL, ok = f.cntURLcache[container]; !ok {
|
||||
cntURL := f.svcURL.NewContainerURL(container)
|
||||
containerURL = &cntURL
|
||||
f.cntURLcache[container] = containerURL
|
||||
}
|
||||
return containerURL
|
||||
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -492,8 +482,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL {
|
||||
return f.cntURL(container).NewBlobURL(containerPath)
|
||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
||||
return f.cntURL.NewBlobURL(f.root + remote)
|
||||
}
|
||||
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
@@ -529,18 +519,16 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
// the container and root supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// The remote has prefix removed from it and if addContainer is set then
|
||||
// it adds the container to the start.
|
||||
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error {
|
||||
if f.cache.IsDeleted(container) {
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
f.containerOKMu.Lock()
|
||||
deleted := f.containerDeleted
|
||||
f.containerOKMu.Unlock()
|
||||
if deleted {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
@@ -555,14 +543,15 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
Prefix: directory,
|
||||
Prefix: root,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
directoryMarkers := map[string]struct{}{}
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -582,17 +571,26 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
// }
|
||||
if !strings.HasPrefix(file.Name, prefix) {
|
||||
if !strings.HasPrefix(file.Name, f.root) {
|
||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(prefix):]
|
||||
remote := file.Name[len(f.root):]
|
||||
if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, file, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Keep track of directory markers. If recursing then
|
||||
// there will be no Prefixes so no need to keep track
|
||||
if !recurse {
|
||||
directoryMarkers[remote] = struct{}{}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, false)
|
||||
if err != nil {
|
||||
@@ -602,13 +600,14 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
remote = remote[len(f.root):]
|
||||
// Don't send if already sent as a directory marker
|
||||
if _, found := directoryMarkers[remote]; found {
|
||||
continue
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, nil, true)
|
||||
@@ -633,9 +632,19 @@ func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the container as being OK
|
||||
func (f *Fs) markContainerOK() {
|
||||
if f.container != "" {
|
||||
f.containerOKMu.Lock()
|
||||
f.containerOK = true
|
||||
f.containerDeleted = false
|
||||
f.containerOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -649,24 +658,17 @@ func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, a
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
f.markContainerOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.isLimited {
|
||||
f.cntURLcacheMu.Lock()
|
||||
for container := range f.cntURLcache {
|
||||
d := fs.NewDir(container, time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
f.cntURLcacheMu.Unlock()
|
||||
return entries, nil
|
||||
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
f.cache.MarkOK(container.Name)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
@@ -686,14 +688,10 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
if f.container == "" {
|
||||
return f.listContainers(dir)
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -713,43 +711,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
container, directory := f.split(dir)
|
||||
if f.container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if container == "" {
|
||||
entries, err := f.listContainers(ctx)
|
||||
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container := entry.Remote()
|
||||
err = listR(container, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
} else {
|
||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -799,43 +776,86 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Check if the container exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after container deletion
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: false,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
MaxResults: 1,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "", options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
}
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
if !f.containerDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.containerOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.cache.MarkDeleted(container)
|
||||
return true, err
|
||||
}
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
// From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container
|
||||
// When a container is deleted, a container with the same name cannot be created
|
||||
// for at least 30 seconds; the container may not be available for more than 30
|
||||
// seconds if the service is still processing the request.
|
||||
time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
f.containerDeleted = false
|
||||
}
|
||||
return errors.Wrap(err, "failed to make container")
|
||||
}
|
||||
|
||||
// isEmpty checks to see if a given (container, directory) is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -850,42 +870,47 @@ func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err erro
|
||||
|
||||
// deleteContainer deletes the container. It can delete a full
|
||||
// container so use isEmpty if you don't want that.
|
||||
func (f *Fs) deleteContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Remove(container, func() error {
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL(container).Delete(ctx, options)
|
||||
}
|
||||
func (f *Fs) deleteContainer() error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
ctx := context.Background()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL.Delete(ctx, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
f.containerDeleted = true
|
||||
}
|
||||
return errors.Wrap(err, "failed to delete container")
|
||||
}
|
||||
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.isEmpty(ctx, container, directory)
|
||||
err := f.isEmpty(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.deleteContainer(ctx, container)
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
return f.deleteContainer()
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -901,12 +926,11 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
dir := "" // forward compat!
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
if f.root != "" || dir != "" {
|
||||
// Delegate to caller if not root container
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return f.deleteContainer(ctx, container)
|
||||
return f.deleteContainer()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -919,8 +943,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -929,7 +952,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlobURL := f.getBlobReference(dstContainer, dstPath)
|
||||
dstBlobURL := f.getBlobReference(remote)
|
||||
srcBlobURL := srcObj.getBlobReference()
|
||||
|
||||
source, err := url.Parse(srcBlobURL.String())
|
||||
@@ -1062,8 +1085,7 @@ func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||
container, directory := o.split()
|
||||
return o.fs.getBlobReference(container, directory)
|
||||
return o.fs.getBlobReference(o.remote)
|
||||
}
|
||||
|
||||
// clearMetaData clears enough metadata so readMetaData will re-read it
|
||||
@@ -1163,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.AccessTier() == azblob.AccessTierArchive {
|
||||
return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
@@ -1369,8 +1391,7 @@ outer:
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
container, _ := o.split()
|
||||
err = o.fs.makeContainer(ctx, container)
|
||||
err = o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
572
backend/b2/b2.go
572
backend/b2/b2.go
@@ -14,6 +14,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -29,7 +30,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
@@ -164,24 +164,24 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
bucketIDMutex sync.Mutex // mutex to protect _bucketID
|
||||
_bucketID map[string]string // the ID of the bucket we are working on
|
||||
bucketTypeMutex sync.Mutex // mutex to protect _bucketType
|
||||
_bucketType map[string]string // the Type of the bucket we are working on
|
||||
info api.AuthorizeAccountResponse // result of authorize call
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketIDMutex sync.Mutex // mutex to protect _bucketID
|
||||
_bucketID string // the ID of the bucket we are working on
|
||||
bucketTypeMutex sync.Mutex // mutex to protect _bucketType
|
||||
_bucketType string // the Type of the bucket we are working on
|
||||
info api.AuthorizeAccountResponse // result of authorize call
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadURLResponse // result of get upload URL calls
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
bufferTokens chan []byte // control concurrency of multipart uploads
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -204,18 +204,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("B2 root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("B2 bucket %s", f.bucket)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("B2 bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
return fmt.Sprintf("B2 bucket %s path %s", f.bucket, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -223,23 +223,21 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a b2 path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a b2 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find bucket in b2 path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
401, // Unauthorized (eg "Token has expired")
|
||||
@@ -337,12 +335,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
@@ -360,6 +352,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "b2: chunk size")
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
@@ -370,21 +366,17 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
cache: bucket.NewCache(),
|
||||
_bucketID: make(map[string]string, 1),
|
||||
_bucketType: make(map[string]string, 1),
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if opt.TestMode != "" {
|
||||
@@ -398,27 +390,33 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "failed to authorize account")
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
if f.bucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.info.Allowed.BucketName
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
if allowedBucket != f.bucket {
|
||||
return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
f.markBucketOK()
|
||||
f.setBucketID(f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
remote := path.Base(directory)
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
f.setRoot(oldRoot)
|
||||
f.root = oldRoot
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
@@ -466,34 +464,30 @@ func (f *Fs) hasPermission(permission string) bool {
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (f *Fs) getUploadURL(bucket string) (upload *api.GetUploadURLResponse, err error) {
|
||||
func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) {
|
||||
f.uploadMu.Lock()
|
||||
defer f.uploadMu.Unlock()
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// look for a stored upload URL for the correct bucketID
|
||||
uploads := f.uploads[bucketID]
|
||||
if len(uploads) > 0 {
|
||||
upload, uploads = uploads[0], uploads[1:]
|
||||
f.uploads[bucketID] = uploads
|
||||
return upload, nil
|
||||
}
|
||||
// get a new upload URL since not found
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_url",
|
||||
}
|
||||
var request = api.GetUploadURLRequest{
|
||||
BucketID: bucketID,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &upload)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
if len(f.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_url",
|
||||
}
|
||||
var request = api.GetUploadURLRequest{
|
||||
BucketID: bucketID,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &upload)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, f.uploads = f.uploads[0], f.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
@@ -504,14 +498,14 @@ func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
|
||||
return
|
||||
}
|
||||
f.uploadMu.Lock()
|
||||
f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload)
|
||||
f.uploads = append(f.uploads, upload)
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (f *Fs) clearUploadURL(bucketID string) {
|
||||
func (f *Fs) clearUploadURL() {
|
||||
f.uploadMu.Lock()
|
||||
delete(f.uploads, bucketID)
|
||||
f.uploads = nil
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
@@ -581,35 +575,27 @@ var errEndList = errors.New("end list")
|
||||
// list lists the objects into the function supplied from
|
||||
// the bucket and root supplied
|
||||
//
|
||||
// (bucket, directory) is the starting directory
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// If prefix is set then it is removed from all file names
|
||||
// level is the depth to search to
|
||||
//
|
||||
// If addBucket is set then it adds the bucket to the start of the
|
||||
// remotes generated
|
||||
//
|
||||
// If recurse is set the function will recursively list
|
||||
// If prefix is set then startFileName is used as a prefix which all
|
||||
// files must have
|
||||
//
|
||||
// If limit is > 0 then it limits to that many files (must be less
|
||||
// than 1000)
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
//
|
||||
// if findFile is set it will look for files called (bucket, directory)
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
|
||||
if !findFile {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -620,11 +606,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var request = api.ListFileNamesRequest{
|
||||
BucketID: bucketID,
|
||||
MaxFileCount: chunkSize,
|
||||
Prefix: directory,
|
||||
Prefix: root,
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
if directory != "" {
|
||||
request.StartFileName = directory
|
||||
prefix = root + prefix
|
||||
if prefix != "" {
|
||||
request.StartFileName = prefix
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -648,19 +635,16 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
return nil
|
||||
}
|
||||
if !strings.HasPrefix(file.Name, prefix) {
|
||||
if !strings.HasPrefix(file.Name, f.root) {
|
||||
fs.Debugf(f, "Odd name received %q", file.Name)
|
||||
continue
|
||||
}
|
||||
remote := file.Name[len(prefix):]
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// Send object
|
||||
err = fn(remote, file, isDirectory)
|
||||
if err != nil {
|
||||
@@ -704,10 +688,19 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -721,12 +714,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
f.markBucketOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
@@ -748,14 +744,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -775,44 +767,23 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
last := ""
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
last := ""
|
||||
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -838,21 +809,8 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.bucketIDMutex.Lock()
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketID = make(map[string]string, 1)
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
f.bucketTypeMutex.Unlock()
|
||||
f.bucketIDMutex.Unlock()
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err = fn(bucket)
|
||||
err = fn(&response.Buckets[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -862,72 +820,72 @@ func (f *Fs) listBucketsToFn(fn listBucketFn) error {
|
||||
|
||||
// getbucketType finds the bucketType for the current bucket name
|
||||
// can be one of allPublic. allPrivate, or snapshot
|
||||
func (f *Fs) getbucketType(bucket string) (bucketType string, err error) {
|
||||
func (f *Fs) getbucketType() (bucketType string, err error) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
bucketType = f._bucketType[bucket]
|
||||
f.bucketTypeMutex.Unlock()
|
||||
if bucketType != "" {
|
||||
return bucketType, nil
|
||||
defer f.bucketTypeMutex.Unlock()
|
||||
if f._bucketType != "" {
|
||||
return f._bucketType, nil
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn reads bucket Types
|
||||
if bucket.Name == f.bucket {
|
||||
bucketType = bucket.Type
|
||||
}
|
||||
return nil
|
||||
|
||||
})
|
||||
f.bucketTypeMutex.Lock()
|
||||
bucketType = f._bucketType[bucket]
|
||||
f.bucketTypeMutex.Unlock()
|
||||
if bucketType == "" {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
f._bucketType = bucketType
|
||||
return bucketType, err
|
||||
}
|
||||
|
||||
// setBucketType sets the Type for the current bucket name
|
||||
func (f *Fs) setBucketType(bucket string, Type string) {
|
||||
func (f *Fs) setBucketType(Type string) {
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketType[bucket] = Type
|
||||
f._bucketType = Type
|
||||
f.bucketTypeMutex.Unlock()
|
||||
}
|
||||
|
||||
// clearBucketType clears the Type for the current bucket name
|
||||
func (f *Fs) clearBucketType(bucket string) {
|
||||
func (f *Fs) clearBucketType() {
|
||||
f.bucketTypeMutex.Lock()
|
||||
delete(f._bucketType, bucket)
|
||||
f._bucketType = ""
|
||||
f.bucketTypeMutex.Unlock()
|
||||
}
|
||||
|
||||
// getBucketID finds the ID for the current bucket name
|
||||
func (f *Fs) getBucketID(bucket string) (bucketID string, err error) {
|
||||
func (f *Fs) getBucketID() (bucketID string, err error) {
|
||||
f.bucketIDMutex.Lock()
|
||||
bucketID = f._bucketID[bucket]
|
||||
f.bucketIDMutex.Unlock()
|
||||
if bucketID != "" {
|
||||
return bucketID, nil
|
||||
defer f.bucketIDMutex.Unlock()
|
||||
if f._bucketID != "" {
|
||||
return f._bucketID, nil
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
// listBucketsToFn sets IDs
|
||||
if bucket.Name == f.bucket {
|
||||
bucketID = bucket.ID
|
||||
}
|
||||
return nil
|
||||
|
||||
})
|
||||
f.bucketIDMutex.Lock()
|
||||
bucketID = f._bucketID[bucket]
|
||||
f.bucketIDMutex.Unlock()
|
||||
if bucketID == "" {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
f._bucketID = bucketID
|
||||
return bucketID, err
|
||||
}
|
||||
|
||||
// setBucketID sets the ID for the current bucket name
|
||||
func (f *Fs) setBucketID(bucket, ID string) {
|
||||
func (f *Fs) setBucketID(ID string) {
|
||||
f.bucketIDMutex.Lock()
|
||||
f._bucketID[bucket] = ID
|
||||
f._bucketID = ID
|
||||
f.bucketIDMutex.Unlock()
|
||||
}
|
||||
|
||||
// clearBucketID clears the ID for the current bucket name
|
||||
func (f *Fs) clearBucketID(bucket string) {
|
||||
func (f *Fs) clearBucketID() {
|
||||
f.bucketIDMutex.Lock()
|
||||
delete(f._bucketID, bucket)
|
||||
f._bucketID = ""
|
||||
f.bucketIDMutex.Unlock()
|
||||
}
|
||||
|
||||
@@ -952,84 +910,83 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_create_bucket",
|
||||
}
|
||||
var request = api.CreateBucketRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
Name: bucket,
|
||||
Type: "allPrivate",
|
||||
}
|
||||
var response api.Bucket
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "duplicate_bucket_name" {
|
||||
// Check this is our bucket - buckets are globally unique and this
|
||||
// might be someone elses.
|
||||
_, getBucketErr := f.getBucketID(bucket)
|
||||
if getBucketErr == nil {
|
||||
// found so it is our bucket
|
||||
return nil
|
||||
}
|
||||
if getBucketErr != fs.ErrorDirNotFound {
|
||||
fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
|
||||
}
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_create_bucket",
|
||||
}
|
||||
var request = api.CreateBucketRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
Name: f.bucket,
|
||||
Type: "allPrivate",
|
||||
}
|
||||
var response api.Bucket
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "duplicate_bucket_name" {
|
||||
// Check this is our bucket - buckets are globally unique and this
|
||||
// might be someone elses.
|
||||
_, getBucketErr := f.getBucketID()
|
||||
if getBucketErr == nil {
|
||||
// found so it is our bucket
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
}
|
||||
if getBucketErr != fs.ErrorDirNotFound {
|
||||
fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
|
||||
}
|
||||
}
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
}
|
||||
f.setBucketID(bucket, response.ID)
|
||||
f.setBucketType(bucket, response.Type)
|
||||
return nil
|
||||
}, nil)
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
}
|
||||
f.setBucketID(response.ID)
|
||||
f.setBucketType(response.Type)
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_bucket",
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var request = api.DeleteBucketRequest{
|
||||
ID: bucketID,
|
||||
AccountID: f.info.AccountID,
|
||||
}
|
||||
var response api.Bucket
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
}
|
||||
f.clearBucketID(bucket)
|
||||
f.clearBucketType(bucket)
|
||||
f.clearUploadURL(bucketID)
|
||||
return nil
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_delete_bucket",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var request = api.DeleteBucketRequest{
|
||||
ID: bucketID,
|
||||
AccountID: f.info.AccountID,
|
||||
}
|
||||
var response api.Bucket
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
}
|
||||
f.bucketOK = false
|
||||
f.clearBucketID()
|
||||
f.clearBucketType()
|
||||
f.clearUploadURL()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -1038,8 +995,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// hide hides a file on the remote
|
||||
func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
func (f *Fs) hide(Name string) error {
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1049,7 +1006,7 @@ func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: bucketPath,
|
||||
Name: Name,
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1064,7 +1021,7 @@ func (f *Fs) hide(bucket, bucketPath string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to hide %q", bucketPath)
|
||||
return errors.Wrapf(err, "failed to hide %q", Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1095,10 +1052,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
|
||||
// if oldOnly is true then it deletes only non current files.
|
||||
//
|
||||
// Implemented here so we can make sure we delete old versions.
|
||||
func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
|
||||
var errReturn error
|
||||
var checkErrMutex sync.Mutex
|
||||
var checkErr = func(err error) {
|
||||
@@ -1139,7 +1093,7 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
}()
|
||||
}
|
||||
last := ""
|
||||
checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
oi, err := f.newObjectWithInfo(ctx, object.Name, object)
|
||||
if err != nil {
|
||||
@@ -1147,7 +1101,6 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
@@ -1177,12 +1130,12 @@ func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool)
|
||||
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
|
||||
return f.purge(ctx, false)
|
||||
}
|
||||
|
||||
// CleanUp deletes all the hidden files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
|
||||
return f.purge(ctx, true)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -1195,8 +1148,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1205,7 +1157,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
destBucketID, err := f.getBucketID(dstBucket)
|
||||
destBucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1215,7 +1167,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: srcObj.id,
|
||||
Name: dstPath,
|
||||
Name: f.root + remote,
|
||||
MetadataDirective: "COPY",
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
@@ -1244,8 +1196,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// getDownloadAuthorization returns authorization token for downloading
|
||||
// without account.
|
||||
func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization string, err error) {
|
||||
// without accout.
|
||||
func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) {
|
||||
validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
|
||||
if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
|
||||
return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
|
||||
@@ -1253,7 +1205,7 @@ func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization stri
|
||||
if !f.hasPermission("shareFiles") {
|
||||
return "", errors.New("sharing a file link requires the shareFiles permission")
|
||||
}
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1277,9 +1229,8 @@ func (f *Fs) getDownloadAuthorization(bucket, remote string) (authorization stri
|
||||
return response.AuthorizationToken, nil
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account
|
||||
// PublicLink returns a link for downloading without accout.
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.DownloadURL
|
||||
@@ -1288,7 +1239,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
}
|
||||
_, err = f.NewObject(ctx, remote)
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
err2 := f.list(ctx, bucket, bucketPath, f.rootDirectory, f.rootBucket == "", false, 1, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err2 := f.list(ctx, remote, false, "", 1, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = nil
|
||||
return nil
|
||||
})
|
||||
@@ -1299,14 +1250,14 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath := "/" + bucketPath
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(bucket)
|
||||
absPath := "/" + path.Join(f.root, remote)
|
||||
link = RootURL + "/file/" + urlEncode(f.bucket) + absPath
|
||||
bucketType, err := f.getbucketType()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if bucketType == "allPrivate" || bucketType == "snapshot" {
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(bucket, remote)
|
||||
AuthorizationToken, err := f.getDownloadAuthorization(remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1400,19 +1351,19 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
||||
|
||||
// getMetaData gets the metadata from the object unconditionally
|
||||
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, bucketPath = api.RemoveVersion(bucketPath)
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
|
||||
err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, o.fs.opt.Versions, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
if remote == bucketPath {
|
||||
if remote == baseRemote {
|
||||
if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
|
||||
return nil
|
||||
}
|
||||
@@ -1490,7 +1441,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, bucketPath := o.split()
|
||||
info.Info[timeKey] = timeString(modTime)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1498,7 +1448,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
var request = api.CopyFileRequest{
|
||||
SourceID: o.id,
|
||||
Name: bucketPath, // copy to same name
|
||||
Name: o.fs.root + o.remote, // copy to same name
|
||||
MetadataDirective: "REPLACE",
|
||||
ContentType: info.ContentType,
|
||||
Info: info.Info,
|
||||
@@ -1581,7 +1531,6 @@ var _ io.ReadCloser = &openFile{}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Options: options,
|
||||
@@ -1599,8 +1548,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
} else {
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(bucket) + "/" + urlEncode(bucketPath)
|
||||
opts.Path += "/file/" + urlEncode(o.fs.bucket) + "/" + urlEncode(o.fs.root+o.remote)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1677,13 +1625,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucket)
|
||||
err = o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
if size == -1 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
buf := o.fs.getUploadBlock()
|
||||
@@ -1729,7 +1676,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Get upload URL
|
||||
upload, err := o.fs.getUploadURL(bucket)
|
||||
upload, err := o.fs.getUploadURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1797,7 +1744,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Body: in,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(bucketPath),
|
||||
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
|
||||
"Content-Type": fs.MimeType(ctx, src),
|
||||
sha1Header: calculatedSha1,
|
||||
timeHeader: timeString(modTime),
|
||||
@@ -1824,14 +1771,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(o.id, bucketPath)
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
return o.fs.hide(bucket, bucketPath)
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -104,14 +104,13 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(bucket)
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: bucketPath,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
|
||||
20
backend/cache/cache.go
vendored
20
backend/cache/cache.go
vendored
@@ -1864,24 +1864,6 @@ func cleanPath(p string) string {
|
||||
return p
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1897,6 +1879,4 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
5
backend/cache/cache_internal_test.go
vendored
5
backend/cache/cache_internal_test.go
vendored
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -356,8 +355,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData1 = []byte(random.String(100))
|
||||
testData2 = []byte(random.String(200))
|
||||
testData1 = []byte(fstest.RandomString(100))
|
||||
testData2 = []byte(fstest.RandomString(200))
|
||||
}
|
||||
|
||||
// write the object
|
||||
|
||||
@@ -802,24 +802,6 @@ func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
return newDir
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
@@ -906,8 +888,6 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
|
||||
@@ -1941,9 +1941,6 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
if f.opt.TrashedOnly {
|
||||
return errors.New("Can't purge with --drive-trashed-only. Use delete if you want to selectively delete files")
|
||||
}
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -975,7 +975,6 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -166,7 +166,6 @@ func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, er
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
item.Filename = restoreReservedChars(item.Filename)
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
@@ -316,8 +315,6 @@ func (f *Fs) getUploadNode() (response *GetUploadNodeResponse, err error) {
|
||||
func (f *Fs) uploadFile(in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = replaceReservedChars(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
@@ -299,14 +299,6 @@ func translateErrorDir(err error) error {
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
Type: ftp.EntryTypeFolder,
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
@@ -374,7 +366,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
|
||||
@@ -23,7 +23,9 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -36,7 +38,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -263,16 +264,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -297,18 +298,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("GCS root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -340,23 +341,21 @@ func shouldRetry(err error) (again bool, errOut error) {
|
||||
return again, err
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a storage 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
@@ -366,12 +365,6 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
@@ -413,19 +406,22 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -435,18 +431,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
}
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, f.rootDirectory).Do()
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -487,17 +485,13 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
@@ -517,36 +511,31 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
if !recurse {
|
||||
var object storage.Object
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
for _, prefix := range objects.Prefixes {
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix) : len(remote)-1]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
err = fn(remote, &object, true)
|
||||
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, prefix) {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size == 0 {
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -575,10 +564,19 @@ func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory b
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -592,12 +590,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
f.markBucketOK()
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
@@ -633,14 +634,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -660,43 +657,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -721,55 +697,58 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
@@ -777,16 +756,19 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
@@ -804,8 +786,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -814,7 +795,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
@@ -822,13 +802,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -911,24 +891,6 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
}
|
||||
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo() (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
@@ -936,8 +898,17 @@ func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.readObjectInfo()
|
||||
var object *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.setMetaData(object)
|
||||
@@ -966,27 +937,15 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// read the complete existing object first
|
||||
object, err := o.readObjectInfo()
|
||||
if err != nil {
|
||||
return err
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Do()
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1007,7 +966,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1035,22 +993,21 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: bucket,
|
||||
Name: bucketPath,
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
@@ -1067,9 +1024,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Do()
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -61,8 +60,6 @@ var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite,
|
||||
},
|
||||
Endpoint: google.Endpoint,
|
||||
@@ -146,20 +143,18 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
albumsMu sync.Mutex // protect albums (but not contents)
|
||||
albums map[bool]*albums // albums, shared or not
|
||||
uploadedMu sync.Mutex // to protect the below
|
||||
uploaded dirtree.DirTree // record of uploaded items
|
||||
createMu sync.Mutex // held when creating albums to prevent dupes
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -246,8 +241,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Box")
|
||||
}
|
||||
@@ -256,14 +250,11 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if root == "." || root == "/" {
|
||||
root = ""
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
unAuth: rest.NewClient(baseClient),
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
ts: ts,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
startTime: time.Now(),
|
||||
albums: map[bool]*albums{},
|
||||
@@ -289,85 +280,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// fetchEndpoint gets the openid endpoint named from the Google config
|
||||
func (f *Fs) fetchEndpoint(name string) (endpoint string, err error) {
|
||||
// Get openID config without auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
||||
}
|
||||
var openIDconfig map[string]interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.unAuth.CallJSON(&opts, nil, &openIDconfig)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't read openID config")
|
||||
}
|
||||
|
||||
// Find userinfo endpoint
|
||||
endpoint, ok := openIDconfig[name].(string)
|
||||
if !ok {
|
||||
return "", errors.Errorf("couldn't find %q from openID config", name)
|
||||
}
|
||||
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// UserInfo fetches info about the current user with oauth2
|
||||
func (f *Fs) UserInfo(ctx context.Context) (userInfo map[string]string, err error) {
|
||||
endpoint, err := f.fetchEndpoint("userinfo_endpoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch the user info with auth
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpoint,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &userInfo)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't read user info")
|
||||
}
|
||||
return userInfo, nil
|
||||
}
|
||||
|
||||
// Disconnect kills the token and refresh token
|
||||
func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
||||
endpoint, err := f.fetchEndpoint("revocation_endpoint")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
token, err := f.ts.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Revoke the token and the refresh token
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: endpoint,
|
||||
MultipartParams: url.Values{
|
||||
"token": []string{token.AccessToken},
|
||||
"token_type_hint": []string{"access_token"},
|
||||
},
|
||||
}
|
||||
var res interface{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, nil, &res)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't revoke token")
|
||||
}
|
||||
fs.Infof(f, "res = %+v", res)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -956,6 +868,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
token, err = rest.ReadBody(resp)
|
||||
@@ -1050,10 +963,8 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Disconnecter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + random.String(24)
|
||||
albumName := "album/rclone-test-" + fstest.RandomString(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
@@ -46,21 +46,6 @@ func init() {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
@@ -84,9 +69,8 @@ directories.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -131,10 +115,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
@@ -160,14 +140,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,20 +316,6 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func addHeaders(req *http.Request, opt *Options) {
|
||||
for i := 0; i < len(opt.Headers); i += 2 {
|
||||
key := opt.Headers[i]
|
||||
value := opt.Headers[i+1]
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func (f *Fs) addHeaders(req *http.Request) {
|
||||
addHeaders(req, &f.opt)
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
@@ -364,13 +326,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
// Do the request
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
@@ -494,12 +450,7 @@ func (o *Object) url() string {
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -551,7 +502,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -27,7 +26,6 @@ var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -35,16 +33,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(handler)
|
||||
ts := httptest.NewServer(fileServer)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
@@ -55,9 +45,8 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
|
||||
@@ -46,82 +46,6 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||
type CustomerInfo struct {
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
CountryCode string `json:"country_code"`
|
||||
LanguageCode string `json:"language_code"`
|
||||
CustomerGroupCode string `json:"customer_group_code"`
|
||||
BrandCode string `json:"brand_code"`
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
WebHash string `json:"web_hash"`
|
||||
AndroidHash string `json:"android_hash"`
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// XML structures returned by the old API
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -140,6 +64,15 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -169,8 +102,8 @@ GET http://www.jottacloud.com/JFS/<account>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// DriveInfo represents a Jottacloud account
|
||||
type DriveInfo struct {
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
@@ -347,3 +280,43 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
@@ -44,13 +44,14 @@ const (
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configUsername = "user"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
@@ -86,9 +87,34 @@ func init() {
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
|
||||
// ask if we should create a device specifc token: https://github.com/rclone/rclone/issues/2995
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
deviceRegistration, err := registerDevice(srv)
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration api.DeviceRegistrationResponse
|
||||
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
}
|
||||
@@ -109,14 +135,53 @@ func init() {
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
username, ok := m.Get(configUsername)
|
||||
if !ok {
|
||||
log.Fatalf("No username defined")
|
||||
}
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
|
||||
token, err := doAuth(srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get resource token: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var token oauth2.Token
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
@@ -130,17 +195,39 @@ func init() {
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
device, mountpoint, err := setupMountpoint(srv, apiSrv)
|
||||
acc, err := getAccountInfo(srv, username)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
log.Fatalf("Error getting devices: %s", err)
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
result := config.Choose("Devices", deviceNames, nil, false)
|
||||
m.Set(configDevice, result)
|
||||
|
||||
dev, err := getDeviceInfo(srv, path.Join(username, result))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting Mountpoint: %s", err)
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
log.Fatalf("No Mountpoints found for this device.")
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
result = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
m.Set(configMountpoint, result)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: configUsername,
|
||||
Help: "User Name:",
|
||||
}, {
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -166,6 +253,7 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
@@ -245,167 +333,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(&opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(&opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, err
|
||||
}
|
||||
|
||||
// setupMountpoint sets up a custom device and mountpoint if desired by the user
|
||||
func setupMountpoint(srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
|
||||
cust, err := getCustomerInfo(apiSrv)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(srv, cust.Username)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
var deviceNames []string
|
||||
for i := range acc.Devices {
|
||||
deviceNames = append(deviceNames, acc.Devices[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
|
||||
device = config.Choose("Devices", deviceNames, nil, false)
|
||||
|
||||
dev, err := getDeviceInfo(srv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if len(dev.MountPoints) == 0 {
|
||||
return "", "", errors.New("no mountpoints for selected device")
|
||||
}
|
||||
var mountpointNames []string
|
||||
for i := range dev.MountPoints {
|
||||
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
|
||||
}
|
||||
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
|
||||
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
|
||||
|
||||
return device, mountpoint, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
func getCustomerInfo(srv *rest.Client) (info *api.CustomerInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "account/v1/customer",
|
||||
}
|
||||
|
||||
_, err = srv.CallJSON(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get customer info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDriveInfo queries general information about the account and the available devices and mountpoints.
|
||||
func getDriveInfo(srv *rest.Client, username string) (info *api.DriveInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: username,
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get drive info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't get device info")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpointURL generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -435,6 +362,54 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getAccountInfo queries general information about the account.
|
||||
// Takes rest.Client and username as parameter to be easily usable
|
||||
// during config
|
||||
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(username),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getDeviceInfo queries Information about a jottacloud device
|
||||
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: urlPathEscape(path),
|
||||
}
|
||||
|
||||
_, err = srv.CallXML(&opts, nil, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// setEndpointUrl reads the account id and generates the API endpoint URL
|
||||
func (f *Fs) setEndpointURL() (err error) {
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get endpoint url")
|
||||
}
|
||||
if f.opt.Device == "" {
|
||||
f.opt.Device = defaultDevice
|
||||
}
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
|
||||
return nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -467,6 +442,11 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
@@ -531,6 +511,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
user: opt.User,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
apiSrv: rest.NewClient(oAuthClient).SetRoot(apiURL),
|
||||
@@ -550,12 +531,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(f.apiSrv)
|
||||
err = f.setEndpointURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "couldn't get account info")
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpointURL()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -640,6 +619,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: f.filePath(dir),
|
||||
@@ -688,6 +668,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
//fmt.Printf("Entries: %+v\n", entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -743,6 +724,17 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -867,6 +859,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
|
||||
return errors.Wrap(err, "couldn't purge directory")
|
||||
}
|
||||
|
||||
// TODO: Parse response?
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -883,6 +876,10 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
@@ -1058,7 +1055,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := getDriveInfo(f.srv, f.user)
|
||||
info, err := getAccountInfo(f.srv, f.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1098,11 +1095,6 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// filePath returns a escaped file path (f.root, remote)
|
||||
func (o *Object) filePath() string {
|
||||
return o.fs.filePath(o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
@@ -1136,7 +1128,6 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData reads and updates the metadata for an object
|
||||
func (o *Object) readMetaData(force bool) (err error) {
|
||||
if o.hasMetaData && !force {
|
||||
return nil
|
||||
@@ -1281,7 +1272,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Path: "allocate",
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
@@ -1340,7 +1331,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
|
||||
// If the file state is COMPLETE we don't need to upload it because the file was allready found but we still ned to update our metadata
|
||||
return o.readMetaData(true)
|
||||
}
|
||||
|
||||
|
||||
@@ -154,7 +154,6 @@ func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
fs.FixRangeOption(options, o.Size())
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -171,6 +170,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
//+build !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
|
||||
return f
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
//+build linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fadvise provides means to automate freeing pages in kernel page cache for
|
||||
// a given file descriptor as the file is sequentially processed (read or
|
||||
// written).
|
||||
//
|
||||
// When copying a file to a remote backend all the file content is read by
|
||||
// kernel and put to page cache to make future reads faster.
|
||||
// This causes memory pressure visible in both memory usage and CPU consumption
|
||||
// and can even cause OOM errors in applications consuming large amounts memory.
|
||||
//
|
||||
// In case of an upload to a remote backend, there is no benefits from caching.
|
||||
//
|
||||
// fadvise would orchestrate calling POSIX_FADV_DONTNEED
|
||||
//
|
||||
// POSIX_FADV_DONTNEED attempts to free cached pages associated
|
||||
// with the specified region. This is useful, for example, while
|
||||
// streaming large files. A program may periodically request the
|
||||
// kernel to free cached data that has already been used, so that
|
||||
// more useful cached pages are not discarded instead.
|
||||
//
|
||||
// Requests to discard partial pages are ignored. It is
|
||||
// preferable to preserve needed data than discard unneeded data.
|
||||
// If the application requires that data be considered for
|
||||
// discarding, then offset and len must be page-aligned.
|
||||
//
|
||||
// The implementation may attempt to write back dirty pages in
|
||||
// the specified region, but this is not guaranteed. Any
|
||||
// unwritten dirty pages will not be freed. If the application
|
||||
// wishes to ensure that dirty pages will be released, it should
|
||||
// call fsync(2) or fdatasync(2) first.
|
||||
type fadvise struct {
|
||||
o *Object
|
||||
fd int
|
||||
lastPos int64
|
||||
curPos int64
|
||||
windowSize int64
|
||||
|
||||
freePagesCh chan offsetLength
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
type offsetLength struct {
|
||||
offset int64
|
||||
length int64
|
||||
}
|
||||
|
||||
const (
|
||||
defaultAllowPages = 32
|
||||
defaultWorkerQueueSize = 64
|
||||
)
|
||||
|
||||
func newFadvise(o *Object, fd int, offset int64) *fadvise {
|
||||
f := &fadvise{
|
||||
o: o,
|
||||
fd: fd,
|
||||
lastPos: offset,
|
||||
curPos: offset,
|
||||
windowSize: int64(os.Getpagesize()) * defaultAllowPages,
|
||||
|
||||
freePagesCh: make(chan offsetLength, defaultWorkerQueueSize),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
go f.worker()
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// sequential configures readahead strategy in Linux kernel.
|
||||
//
|
||||
// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the
|
||||
// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles
|
||||
// this size, and POSIX_FADV_RANDOM disables file readahead entirely.
|
||||
func (f *fadvise) sequential(limit int64) bool {
|
||||
l := int64(0)
|
||||
if limit > 0 {
|
||||
l = limit
|
||||
}
|
||||
if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil {
|
||||
fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *fadvise) next(n int) {
|
||||
f.curPos += int64(n)
|
||||
f.freePagesIfNeeded()
|
||||
}
|
||||
|
||||
func (f *fadvise) freePagesIfNeeded() {
|
||||
if f.curPos >= f.lastPos+f.windowSize {
|
||||
f.freePages()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fadvise) freePages() {
|
||||
f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos}
|
||||
f.lastPos = f.curPos
|
||||
}
|
||||
|
||||
func (f *fadvise) worker() {
|
||||
for p := range f.freePagesCh {
|
||||
if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil {
|
||||
fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err)
|
||||
}
|
||||
}
|
||||
|
||||
close(f.doneCh)
|
||||
}
|
||||
|
||||
func (f *fadvise) wait() {
|
||||
close(f.freePagesCh)
|
||||
<-f.doneCh
|
||||
}
|
||||
|
||||
type fadviseReadCloser struct {
|
||||
*fadvise
|
||||
inner io.ReadCloser
|
||||
}
|
||||
|
||||
// newFadviseReadCloser wraps os.File so that reading from that file would
|
||||
// remove already consumed pages from kernel page cache.
|
||||
// In addition to that it instructs kernel to double the readahead window to
|
||||
// make sequential reads faster.
|
||||
// See also fadvise.
|
||||
func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser {
|
||||
r := fadviseReadCloser{
|
||||
fadvise: newFadvise(o, int(f.Fd()), offset),
|
||||
inner: f,
|
||||
}
|
||||
|
||||
// If syscall failed it's likely that the subsequent syscalls to that
|
||||
// file descriptor would also fail. In that case return the provided os.File
|
||||
// pointer.
|
||||
if !r.sequential(limit) {
|
||||
r.wait()
|
||||
return f
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (f fadviseReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = f.inner.Read(p)
|
||||
f.next(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (f fadviseReadCloser) Close() error {
|
||||
f.freePages()
|
||||
f.wait()
|
||||
return f.inner.Close()
|
||||
}
|
||||
@@ -194,7 +194,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
}).Fill(f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -778,11 +777,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
var fd *os.File
|
||||
fd, err = file.Open(o.path)
|
||||
if fd != nil {
|
||||
in = newFadviseReadCloser(o, fd, 0, 0)
|
||||
}
|
||||
in, err = file.Open(o.path)
|
||||
} else {
|
||||
in, err = o.openTranslatedLink(0, -1)
|
||||
}
|
||||
@@ -918,7 +913,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
var hasher *hash.MultiHasher
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -926,12 +921,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.size)
|
||||
case *fs.HashesOption:
|
||||
if x.Hashes.Count() > 0 {
|
||||
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hashes = x.Hashes
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -948,22 +938,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return wrappedFd, err
|
||||
}
|
||||
if hasher == nil {
|
||||
// no need to wrap since we don't need checksums
|
||||
return wrappedFd, nil
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Update the hashes as we go along
|
||||
// Update the md5sum as we go along
|
||||
in = &localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
hash: hasher,
|
||||
hash: hash,
|
||||
fd: fd,
|
||||
}
|
||||
return in, nil
|
||||
@@ -985,23 +975,18 @@ func (nwc nopWriterCloser) Close() error {
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
var out io.WriteCloser
|
||||
var hasher *hash.MultiHasher
|
||||
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.HashesOption:
|
||||
if x.Hashes.Count() > 0 {
|
||||
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hashes = x.Hashes
|
||||
}
|
||||
}
|
||||
|
||||
err = o.mkdirAll()
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1026,9 +1011,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
if hasher != nil {
|
||||
in = io.TeeReader(in, hasher)
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in = io.TeeReader(in, hash)
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
closeErr := out.Close()
|
||||
@@ -1064,11 +1051,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// All successful so update the hashes
|
||||
if hasher != nil {
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hasher.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hash.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
// Set the mtime
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
|
||||
@@ -1464,24 +1464,22 @@ func (o *Object) uploadFragment(url string, start int64, totalSize int64, chunk
|
||||
}
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
if resp != nil {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
retry, err := shouldRetry(resp, err)
|
||||
if !retry && resp != nil {
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
// read the item
|
||||
info = &api.Item{}
|
||||
return false, json.NewDecoder(resp.Body).Decode(info)
|
||||
}
|
||||
}
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
// we are done :)
|
||||
// read the item
|
||||
info = &api.Item{}
|
||||
return false, json.Unmarshal(body, info)
|
||||
}
|
||||
return false, nil
|
||||
return retry, err
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
// Package api contains definitions for using the premiumize.me API
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response is returned by all messages and embedded in the
|
||||
// structures below
|
||||
type Response struct {
|
||||
Message string `json:"message,omitempty"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Response) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Status, e.Message)
|
||||
}
|
||||
|
||||
// AsErr checks the status and returns an err if bad or nil if good
|
||||
func (e *Response) AsErr() error {
|
||||
if e.Status != "success" {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Item Types
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
)
|
||||
|
||||
// Item refers to a file or folder
|
||||
type Item struct {
|
||||
Breadcrumbs []Breadcrumb `json:"breadcrumbs"`
|
||||
CreatedAt int64 `json:"created_at,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Link string `json:"link,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
StreamLink string `json:"stream_link,omitempty"`
|
||||
Type string `json:"type"`
|
||||
TranscodeStatus string `json:"transcode_status"`
|
||||
IP string `json:"ip"`
|
||||
MimeType string `json:"mime_type"`
|
||||
}
|
||||
|
||||
// Breadcrumb is part the breadcrumb trail for a file or folder. It
|
||||
// is returned as part of folder/list if required
|
||||
type Breadcrumb struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderListResponse is the response to folder/list
|
||||
type FolderListResponse struct {
|
||||
Response
|
||||
Content []Item `json:"content"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderCreateResponse is the response to folder/create
|
||||
type FolderCreateResponse struct {
|
||||
Response
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// FolderUploadinfoResponse is the response to folder/uploadinfo
|
||||
type FolderUploadinfoResponse struct {
|
||||
Response
|
||||
Token string `json:"token,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse is the response to account/info
|
||||
type AccountInfoResponse struct {
|
||||
Response
|
||||
CustomerID string `json:"customer_id,omitempty"`
|
||||
LimitUsed float64 `json:"limit_used,omitempty"` // fraction 0..1 of download traffic limit
|
||||
PremiumUntil int64 `json:"premium_until,omitempty"`
|
||||
SpaceUsed float64 `json:"space_used,omitempty"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,17 +0,0 @@
|
||||
// Test filesystem interface
|
||||
package premiumizeme_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/premiumizeme"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPremiumizeMe:",
|
||||
NilObject: (*premiumizeme.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Fs represents a remote Putio server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
client *putio.Client // client for making API calls to Put.io
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
oAuthClient *http.Client
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Putio root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
}
|
||||
p := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
oAuthClient: oAuthClient,
|
||||
}
|
||||
p.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(p)
|
||||
p.dirCache = dircache.New(root, "0", p)
|
||||
ctx := context.Background()
|
||||
// Find the current root
|
||||
err = p.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *p
|
||||
tempF.dirCache = dircache.New(newRoot, "0", &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return p, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// unable to list folder so return old f
|
||||
return p, nil
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
p.dirCache = tempF.dirCache
|
||||
p.root = tempF.root
|
||||
return p, fs.ErrorIsFile
|
||||
}
|
||||
// fs.Debugf(p, "Root id: %s", p.dirCache.RootID())
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func itoa(i int64) string {
|
||||
return strconv.FormatInt(i, 10)
|
||||
}
|
||||
|
||||
func atoi(a string) int64 {
|
||||
i, err := strconv.ParseInt(a, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("newID=%v, err=%v", newID, &err)
|
||||
parentID := atoi(pathID)
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID)
|
||||
entry, err = f.client.Files.CreateFolder(ctx, leaf, parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return itoa(entry.ID), err
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("pathIDOut=%v, found=%v, err=%v", pathIDOut, found, &err)
|
||||
if pathID == "0" && leaf == "" {
|
||||
// that's the root directory
|
||||
return pathID, true, nil
|
||||
}
|
||||
fileID := atoi(pathID)
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing file: %d", fileID)
|
||||
children, _, err = f.client.Files.List(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
if child.Name == leaf {
|
||||
found = true
|
||||
pathIDOut = itoa(child.ID)
|
||||
if !child.IsDir() {
|
||||
err = fs.ErrorNotAFile
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentID := atoi(directoryID)
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files inside List: %d", parentID)
|
||||
children, _, err = f.client.Files.List(ctx, parentID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
remote := path.Join(dir, child.Name)
|
||||
// fs.Debugf(f, "child: %s", remote)
|
||||
if child.IsDir() {
|
||||
f.dirCache.Put(remote, itoa(child.ID))
|
||||
d := fs.NewDir(remote, child.UpdatedAt.Time)
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileID, err := f.sendUpload(ctx, loc, size, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entry putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting file: %d", fileID)
|
||||
entry, err = f.client.Files.Get(ctx, fileID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, entry)
|
||||
}
|
||||
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-length", strconv.FormatInt(size, 10))
|
||||
b64name := base64.StdEncoding.EncodeToString([]byte(name))
|
||||
b64true := base64.StdEncoding.EncodeToString([]byte("true"))
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
if retry {
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
return false, errors.New("empty location header from upload create")
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
|
||||
if size == 0 {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
var start int64
|
||||
buf := make([]byte, defaultChunkSize)
|
||||
for start < size {
|
||||
reqSize := size - start
|
||||
if reqSize >= int64(defaultChunkSize) {
|
||||
reqSize = int64(defaultChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
|
||||
// TODO get file offset and seek to the position
|
||||
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req, err := f.makeUploadPatchRequest(location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
if res.StatusCode != 204 {
|
||||
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
|
||||
}
|
||||
sfid := res.Header.Get("putio-file-id")
|
||||
if sfid != "" {
|
||||
fileID, err = strconv.ParseInt(sfid, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
req, err := http.NewRequest("PATCH", location, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
req.Header.Set("upload-offset", strconv.FormatInt(offset, 10))
|
||||
req.Header.Set("content-length", strconv.FormatInt(length, 10))
|
||||
req.Header.Set("content-type", "application/offset+octet-stream")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%v", dir)("err=%v", &err)
|
||||
|
||||
root := strings.Trim(path.Join(f.root, dir), "/")
|
||||
|
||||
// can't remove root
|
||||
if root == "" {
|
||||
return errors.New("can't remove root directory")
|
||||
}
|
||||
|
||||
// check directory exists
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
dirID := atoi(directoryID)
|
||||
|
||||
// check directory empty
|
||||
var children []putio.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "listing files: %d", dirID)
|
||||
children, _, err = f.client.Files.List(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
if len(children) != 0 {
|
||||
return errors.New("directory not empty")
|
||||
}
|
||||
|
||||
// remove it
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", dirID)
|
||||
err = f.client.Files.Delete(ctx, dirID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.FlushDir(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
|
||||
if f.root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootID := atoi(f.dirCache.RootID())
|
||||
// Let putio delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "deleting file: %d", rootID)
|
||||
err = f.client.Files.Delete(ctx, rootID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
f.dirCache.ResetRoot()
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10))
|
||||
params.Set("parent_id", directoryID)
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
// defer log.Trace(f, "src=%+v, srcRemote=%v, dstRemote", src, srcRemote, dstRemote)("err=%v", &err)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Refuse to move to or from the root
|
||||
if srcPath == "" || dstPath == "" {
|
||||
return errors.New("can't move root directory")
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if f.dirCache.FoundRoot() {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of dst parent, creating subdirs if necessary
|
||||
var leaf, dstDirectoryID string
|
||||
findPath := dstRemote
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
params := url.Values{}
|
||||
params.Set("file_id", srcID)
|
||||
params.Set("parent_id", dstDirectoryID)
|
||||
params.Set("name", leaf)
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode()))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
// fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID)
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
// defer log.Trace(f, "")("usage=%+v, err=%v", usage, &err)
|
||||
var ai putio.AccountInfo
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(f, "getting account info")
|
||||
ai, err = f.client.Account.Info(ctx)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(ai.Disk.Used), // bytes in use
|
||||
Free: fs.NewUsageValue(ai.Disk.Avail), // bytes which can be uploaded before reaching the quota
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.CRC32)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
// defer log.Trace(f, "")("")
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
// defer log.Trace(f, "")("err=%v", &err)
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
req, err := f.client.NewRequest(ctx, "POST", "/v2/trash/empty", nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// fs.Debugf(f, "emptying trash")
|
||||
_, err = f.client.Do(req, nil)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1,276 +0,0 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a Putio object
|
||||
//
|
||||
// Putio Objects always have full metadata
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
file *putio.File
|
||||
remote string // The remote path
|
||||
modtime time.Time
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "remote=%v", remote)("o=%+v, err=%v", &o, &err)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = obj.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info putio.File) (o fs.Object, err error) {
|
||||
// defer log.Trace(f, "remote=%v, info=+v", remote, &info)("o=%+v, err=%v", &o, &err)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err = obj.setMetadataFromEntry(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.CRC32 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read hash from metadata")
|
||||
}
|
||||
return o.file.CRC32, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
if o.file == nil {
|
||||
return 0
|
||||
}
|
||||
return o.file.Size
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.file == nil {
|
||||
return ""
|
||||
}
|
||||
return itoa(o.file.ID)
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return o.file.ContentType
|
||||
}
|
||||
|
||||
// setMetadataFromEntry sets the fs data from a putio.File
|
||||
//
|
||||
// This isn't a complete set of metadata and has an inacurate date
|
||||
func (o *Object) setMetadataFromEntry(info putio.File) error {
|
||||
o.file = &info
|
||||
o.modtime = info.UpdatedAt.Time
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reads the entry for a file from putio
|
||||
func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) {
|
||||
// defer log.Trace(o, "")("f=%+v, err=%v", f, &err)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var resp struct {
|
||||
File putio.File `json:"file"`
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf)
|
||||
req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.PathEscape(leaf), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = o.fs.client.Do(req, &resp)
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 {
|
||||
return false, fs.ErrorObjectNotFound
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return &resp.File, err
|
||||
}
|
||||
|
||||
// Read entry if not set and set metadata from it
|
||||
func (o *Object) readEntryAndSetMetadata(ctx context.Context) error {
|
||||
if o.file != nil {
|
||||
return nil
|
||||
}
|
||||
entry, err := o.readEntry(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.setMetadataFromEntry(*entry)
|
||||
}
|
||||
|
||||
// Returns the remote path for the object
|
||||
func (o *Object) remotePath() string {
|
||||
return path.Join(o.fs.root, o.remote)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
if o.modtime.IsZero() {
|
||||
err := o.readEntryAndSetMetadata(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
}
|
||||
return o.modtime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// defer log.Trace(o, "modTime=%v", modTime.String())("err=%v", &err)
|
||||
req, err := o.fs.client.NewRequest(ctx, "POST", "/v2/files/touch?file_id="+strconv.FormatInt(o.file.ID, 10)+"&updated_at="+url.QueryEscape(modTime.Format(time.RFC3339)), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(o, "setting modtime: %s", modTime.String())
|
||||
_, err = o.fs.client.Do(req, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.modtime = modTime
|
||||
if o.file != nil {
|
||||
o.file.UpdatedAt.Time = modTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// defer log.Trace(o, "")("err=%v", &err)
|
||||
var storageURL string
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, _ := http.NewRequest(http.MethodGet, storageURL, nil)
|
||||
req.Header.Set("User-Agent", o.fs.client.UserAgent)
|
||||
|
||||
// merge headers with extra headers
|
||||
for header, value := range headers {
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
}
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*o = *(newObj.(*Object))
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer log.Trace(o, "")("err=%v", &err)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// fs.Debugf(o, "removing file: id=%d", o.file.ID)
|
||||
err = o.fs.client.Files.Delete(ctx, o.file.ID)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// Constants
|
||||
const (
|
||||
rcloneClientID = "4131"
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.MebiByte
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
putioConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
|
||||
TokenURL: "https://api.put.io/v2/oauth2/access_token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for ignoring unnecessary files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r)$`)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("putio", name, m, putioConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
@@ -1,16 +0,0 @@
|
||||
// Test Put.io filesystem interface
|
||||
package putio
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestPutio:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -23,10 +24,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
|
||||
qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/config"
|
||||
qsErr "github.com/yunify/qingstor-sdk-go/request/errors"
|
||||
qs "github.com/yunify/qingstor-sdk-go/service"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -146,15 +146,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -175,23 +176,22 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a qingstor path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a qingstor 'url'
|
||||
func qsParsePath(path string) (bucket, key string, err error) {
|
||||
// Pattern to match a qingstor path
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path)
|
||||
} else {
|
||||
bucket, key = parts[1], parts[2]
|
||||
key = strings.Trim(key, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// Split an URL into three parts: protocol host and port
|
||||
func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
/*
|
||||
@@ -301,12 +301,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -323,6 +317,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "qingstor: upload cutoff")
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -333,33 +331,36 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
cache: bucket.NewCache(),
|
||||
name: name,
|
||||
root: key,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
|
||||
if f.root != "" {
|
||||
if !strings.HasSuffix(f.root, "/") {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bucketInit.HeadObject(f.rootDirectory, &qs.HeadObjectInput{})
|
||||
_, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
f.root = path.Dir(key)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -374,18 +375,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("QingStor root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("QingStor bucket %s", f.bucket)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root)
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -425,8 +426,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -435,21 +435,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
source := path.Join("/", srcBucket, srcPath)
|
||||
srcFs := srcObj.fs
|
||||
key := f.root + remote
|
||||
source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote)
|
||||
|
||||
// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
|
||||
fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
|
||||
req := qs.PutObjectInput{
|
||||
XQSCopySource: &source,
|
||||
}
|
||||
bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bucketInit.PutObject(dstPath, &req)
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
@@ -510,27 +511,29 @@ type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
|
||||
prefix := f.root
|
||||
if dir != "" {
|
||||
prefix += dir + "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
|
||||
maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
|
||||
for {
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME need to implement ALL loop
|
||||
req := qs.ListObjectsInput{
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
Prefix: &prefix,
|
||||
Limit: &maxLimit,
|
||||
Marker: marker,
|
||||
}
|
||||
@@ -543,6 +546,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
return err
|
||||
}
|
||||
rootLength := len(f.root)
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix == nil {
|
||||
@@ -550,17 +554,15 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = remote[rootLength:]
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
|
||||
err = fn(remote, &qs.KeyType{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -570,25 +572,19 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
|
||||
for _, object := range resp.Keys {
|
||||
key := qs.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, prefix) {
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Logf(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote := key[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote := key[rootLength:]
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if resp.HasMore != nil && !*resp.HasMore {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextMarker == nil || *resp.NextMarker == "" {
|
||||
fs.Errorf(f, "Expecting NextMarker but didn't find one")
|
||||
//marker = resp.Keys[len(resp.Keys)-1].Key
|
||||
break
|
||||
} else {
|
||||
marker = resp.NextMarker
|
||||
@@ -614,10 +610,20 @@ func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists files and directories to out
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -631,12 +637,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
f.markBucketOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
req := qs.ListBucketsInput{
|
||||
Location: &f.zone,
|
||||
}
|
||||
@@ -662,14 +672,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -689,105 +695,106 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
func (f *Fs) dirExists() (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, err = bucketInit.Head()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
/* When delete a bucket, qingstor need about 60 second to sync status;
|
||||
So, need wait for it sync end if we try to operation a just deleted bucket
|
||||
*/
|
||||
retries := 0
|
||||
for retries <= 120 {
|
||||
statistics, err := bucketInit.GetStatistics()
|
||||
if statistics == nil || err != nil {
|
||||
break
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if !f.bucketDeleted {
|
||||
exists, err := f.dirExists()
|
||||
if err == nil {
|
||||
f.bucketOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
/* When delete a bucket, qingstor need about 60 second to sync status;
|
||||
So, need wait for it sync end if we try to operation a just deleted bucket
|
||||
*/
|
||||
wasDeleted := false
|
||||
retries := 0
|
||||
for retries <= 120 {
|
||||
statistics, err := bucketInit.GetStatistics()
|
||||
if statistics == nil || err != nil {
|
||||
break
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
wasDeleted = true
|
||||
continue
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
retries = 0
|
||||
for retries <= 120 {
|
||||
_, err = bucketInit.Put()
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusConflict {
|
||||
if wasDeleted {
|
||||
fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
break
|
||||
_, err = bucketInit.Put()
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusConflict {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}, nil)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// bucketIsEmpty check if the bucket empty
|
||||
func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
// dirIsEmpty check if the bucket empty
|
||||
func (f *Fs) dirIsEmpty() (bool, error) {
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@@ -805,64 +812,71 @@ func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
|
||||
|
||||
// Rmdir delete a bucket
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
isEmpty, err := f.bucketIsEmpty(bucket)
|
||||
|
||||
isEmpty, err := f.dirIsEmpty()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isEmpty {
|
||||
// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
|
||||
fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket)
|
||||
return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
// fs.Debugf(f, "Deleting the bucket %s", bucket)
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
retries := 0
|
||||
for retries <= 10 {
|
||||
_, delErr := bucketInit.Delete()
|
||||
if delErr != nil {
|
||||
if e, ok := delErr.(*qsErr.QingStorError); ok {
|
||||
switch e.Code {
|
||||
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
|
||||
// wait for lease status ready
|
||||
case "lease_not_ready":
|
||||
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
|
||||
retries++
|
||||
time.Sleep(time.Second * 1)
|
||||
continue
|
||||
default:
|
||||
err = e
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = delErr
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Tried to delete the bucket %s", f.bucket)
|
||||
bucketInit, err := f.svc.Bucket(f.bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
})
|
||||
}
|
||||
retries := 0
|
||||
for retries <= 10 {
|
||||
_, delErr := bucketInit.Delete()
|
||||
if delErr != nil {
|
||||
if e, ok := delErr.(*qsErr.QingStorError); ok {
|
||||
switch e.Code {
|
||||
// The status of "lease" takes a few seconds to "ready" when creating a new bucket
|
||||
// wait for lease status ready
|
||||
case "lease_not_ready":
|
||||
fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
|
||||
retries++
|
||||
time.Sleep(time.Second * 1)
|
||||
continue
|
||||
default:
|
||||
err = e
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = delErr
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
if err != nil {
|
||||
// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -924,10 +938,10 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
// Copy the object to itself to update the metadata
|
||||
bucket, bucketPath := o.split()
|
||||
sourceKey := path.Join("/", bucket, bucketPath)
|
||||
key := o.fs.root + o.remote
|
||||
sourceKey := path.Join("/", o.fs.bucket, key)
|
||||
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -936,21 +950,20 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
XQSCopySource: &sourceKey,
|
||||
ContentType: &mimeType,
|
||||
}
|
||||
_, err = bucketInit.PutObject(bucketPath, &req)
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
req := qs.GetObjectInput{}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
@@ -962,7 +975,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
}
|
||||
}
|
||||
}
|
||||
resp, err := bucketInit.GetObject(bucketPath, &req)
|
||||
resp, err := bucketInit.GetObject(key, &req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -972,21 +985,21 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
// Update in to the object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
req := uploadInput{
|
||||
body: in,
|
||||
qsSvc: o.fs.svc,
|
||||
bucket: bucket,
|
||||
bucket: o.fs.bucket,
|
||||
zone: o.fs.zone,
|
||||
key: bucketPath,
|
||||
key: key,
|
||||
mimeType: mimeType,
|
||||
partSize: int64(o.fs.opt.ChunkSize),
|
||||
concurrency: o.fs.opt.UploadConcurrency,
|
||||
@@ -1010,12 +1023,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
|
||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = bucketInit.DeleteObject(bucketPath)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
_, err = bucketInit.DeleteObject(key)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
qs "github.com/yunify/qingstor-sdk-go/v3/service"
|
||||
qs "github.com/yunify/qingstor-sdk-go/service"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
515
backend/s3/s3.go
515
backend/s3/s3.go
@@ -23,6 +23,7 @@ import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -45,7 +46,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
@@ -798,9 +798,10 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
}
|
||||
@@ -818,7 +819,6 @@ type Object struct {
|
||||
lastModified time.Time // Last modified
|
||||
meta map[string]*string // The object metadata if known - may be nil
|
||||
mimeType string // MimeType of object - may be ""
|
||||
storageClass string // eg GLACIER
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -830,18 +830,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("S3 root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("S3 bucket %s", f.bucket)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("S3 bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -868,16 +868,14 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket - can only update if running from a bucket
|
||||
if f.rootBucket != "" {
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket(f.rootBucket)
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
urfbErr := f.updateRegionForBucket()
|
||||
if urfbErr != nil {
|
||||
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
|
||||
return false, err
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
for _, e := range retryErrorCodes {
|
||||
if reqErr.StatusCode() == e {
|
||||
@@ -890,23 +888,21 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a s3 path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a s3 'url'
|
||||
func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
@@ -1043,12 +1039,6 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -1065,6 +1055,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "s3: upload cutoff")
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
@@ -1076,39 +1070,38 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
name: name,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &f.rootBucket,
|
||||
Key: &f.rootDirectory,
|
||||
Bucket: &f.bucket,
|
||||
Key: &directory,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.HeadObject(&req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -1135,7 +1128,6 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.etag = aws.StringValue(info.ETag)
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = aws.StringValue(info.StorageClass)
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -1152,9 +1144,9 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
||||
func (f *Fs) getBucketLocation() (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
@@ -1170,8 +1162,8 @@ func (f *Fs) getBucketLocation(bucket string) (string, error) {
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
// bucket then updating the session.
|
||||
func (f *Fs) updateRegionForBucket(bucket string) error {
|
||||
region, err := f.getBucketLocation(bucket)
|
||||
func (f *Fs) updateRegionForBucket() error {
|
||||
region, err := f.getBucketLocation()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading bucket location failed")
|
||||
}
|
||||
@@ -1199,18 +1191,15 @@ func (f *Fs) updateRegionForBucket(bucket string) error {
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
|
||||
// list lists the objects into the function supplied from
|
||||
// the bucket and directory supplied. The remote has prefix
|
||||
// removed from it and if addBucket is set then it adds the
|
||||
// bucket to the start.
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
maxKeys := int64(listChunkSize)
|
||||
delimiter := ""
|
||||
@@ -1221,9 +1210,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &f.bucket,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
Prefix: &root,
|
||||
MaxKeys: &maxKeys,
|
||||
Marker: marker,
|
||||
}
|
||||
@@ -1239,19 +1228,9 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
if f.rootBucket == "" {
|
||||
// if listing from the root ignore wrong region requests returning
|
||||
// empty directory
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
rootLength := len(f.root)
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.CommonPrefixes {
|
||||
if commonPrefix.Prefix == nil {
|
||||
@@ -1259,14 +1238,11 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote := *commonPrefix.Prefix
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = remote[rootLength:]
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
@@ -1277,18 +1253,22 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
}
|
||||
for _, object := range resp.Contents {
|
||||
remote := aws.StringValue(object.Key)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
key := aws.StringValue(object.Key)
|
||||
if !strings.HasPrefix(key, f.root) {
|
||||
fs.Logf(f, "Odd name received %q", key)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote := key[rootLength:]
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
remote = remote[:len(remote)-1]
|
||||
err = fn(remote, &s3.Object{Key: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -1329,10 +1309,20 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Objec
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists files and directories to out
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||
err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1346,12 +1336,15 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
f.markBucketOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
req := s3.ListBucketsInput{}
|
||||
var resp *s3.ListBucketsOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1362,9 +1355,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range resp.Buckets {
|
||||
bucketName := aws.StringValue(bucket.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
|
||||
d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
@@ -1380,14 +1371,10 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(ctx, dir)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return f.listDir(ctx, dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1405,45 +1392,24 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively than doing a directory traversal.
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -1465,9 +1431,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// Check if the bucket exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after bucket deletion
|
||||
func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
||||
func (f *Fs) dirExists(ctx context.Context) (bool, error) {
|
||||
req := s3.HeadBucketInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.HeadBucketWithContext(ctx, &req)
|
||||
@@ -1486,61 +1452,68 @@ func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||
}
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucket)
|
||||
}
|
||||
if !f.bucketDeleted {
|
||||
exists, err := f.dirExists(ctx)
|
||||
if err == nil {
|
||||
f.bucketOK = exists
|
||||
}
|
||||
if err != nil || exists {
|
||||
return err
|
||||
}
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
f.bucketDeleted = false
|
||||
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &bucket,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucket)
|
||||
}
|
||||
return err
|
||||
req := s3.DeleteBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
f.bucketDeleted = true
|
||||
fs.Infof(f, "Bucket deleted")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
@@ -1554,31 +1527,6 @@ func pathEscape(s string) string {
|
||||
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
|
||||
}
|
||||
|
||||
// copy does a server side copy
|
||||
//
|
||||
// It adds the boiler plate to the req passed in and calls the s3
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if req.StorageClass == nil && f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.CopyObjectWithContext(ctx, req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
@@ -1589,8 +1537,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1599,11 +1546,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
srcFs := srcObj.fs
|
||||
key := f.root + remote
|
||||
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.opt.ACL,
|
||||
Key: &key,
|
||||
CopySource: &source,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath)
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
}
|
||||
if f.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
|
||||
}
|
||||
if f.opt.StorageClass != "" {
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.c.CopyObjectWithContext(ctx, &req)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1675,10 +1640,10 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.HeadObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
var resp *s3.HeadObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1703,7 +1668,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
o.lastModified = time.Now()
|
||||
@@ -1754,19 +1718,39 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, o)
|
||||
|
||||
// Copy the object to itself to update the metadata
|
||||
bucket, bucketPath := o.split()
|
||||
key := o.fs.root + o.remote
|
||||
sourceKey := o.fs.bucket + "/" + key
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
CopySource: aws.String(pathEscape(sourceKey)),
|
||||
Metadata: o.meta,
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
|
||||
MetadataDirective: &directive,
|
||||
}
|
||||
return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass == "GLACIER" || o.fs.opt.StorageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
@@ -1776,12 +1760,11 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
@@ -1801,7 +1784,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
if err.Code() == "InvalidObjectState" {
|
||||
return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
|
||||
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -1812,8 +1795,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1866,11 +1848,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
key := o.fs.root + o.remote
|
||||
if multipart {
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
@@ -1894,9 +1878,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
} else {
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
@@ -1969,10 +1953,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
key := o.fs.root + o.remote
|
||||
req := s3.DeleteObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
Bucket: &o.fs.bucket,
|
||||
Key: &key,
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
||||
@@ -1991,31 +1975,6 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToUpper(tier)
|
||||
bucket, bucketPath := o.split()
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
StorageClass: aws.String(tier),
|
||||
}
|
||||
err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageClass = tier
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageClass == "" {
|
||||
return "STANDARD"
|
||||
}
|
||||
return o.storageClass
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -2024,6 +1983,4 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
RemoteName: "TestS3:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
|
||||
@@ -36,8 +36,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
hashCommandNotSupported = "none"
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -128,16 +127,6 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "md5sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read md5 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sha1sum_command",
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -157,17 +146,14 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
m configmap.Mapper // config
|
||||
features *fs.Features // optional features
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
@@ -435,17 +421,16 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
|
||||
return NewFsWithConnection(ctx, name, root, opt, sshConfig)
|
||||
}
|
||||
|
||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||
// the host specified in the ssh.ClientConfig
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
func NewFsWithConnection(ctx context.Context, name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
m: m,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
@@ -771,79 +756,45 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return nil
|
||||
}
|
||||
|
||||
// run runds cmd on the remote end returning standard output
|
||||
func (f *Fs) run(cmd string) ([]byte, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP connection")
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "run: get SFTP sessiion")
|
||||
}
|
||||
defer func() {
|
||||
_ = session.Close()
|
||||
}()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
err = session.Run(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to run %q: %s", cmd, stderr.Bytes())
|
||||
}
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
if f.cachedHashes != nil {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
// look for a hash command which works
|
||||
checkHash := func(commands []string, expected string, hashCommand *string, changed *bool) bool {
|
||||
if *hashCommand == hashCommandNotSupported {
|
||||
return false
|
||||
}
|
||||
if *hashCommand != "" {
|
||||
return true
|
||||
}
|
||||
*changed = true
|
||||
for _, command := range commands {
|
||||
output, err := f.run(command)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
output = bytes.TrimSpace(output)
|
||||
fs.Debugf(f, "checking %q command: %q", command, output)
|
||||
if parseHash(output) == expected {
|
||||
*hashCommand = command
|
||||
return true
|
||||
}
|
||||
}
|
||||
*hashCommand = hashCommandNotSupported
|
||||
return false
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
changed := false
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Couldn't get SSH connection to figure out Hashes: %v", err)
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
sha1Output, _ := session.Output("echo 'abc' | sha1sum")
|
||||
expectedSha1 := "03cfd743661f07975fa2f1220c5194cbaff48451"
|
||||
_ = session.Close()
|
||||
|
||||
session, err = c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
md5Output, _ := session.Output("echo 'abc' | md5sum")
|
||||
expectedMd5 := "0bee89b07a248e27c83fc3d5951213c1"
|
||||
_ = session.Close()
|
||||
|
||||
sha1Works := parseHash(sha1Output) == expectedSha1
|
||||
md5Works := parseHash(md5Output) == expectedMd5
|
||||
|
||||
set := hash.NewHashSet()
|
||||
if !sha1Works && !md5Works {
|
||||
set.Add(hash.None)
|
||||
}
|
||||
if sha1Works {
|
||||
set.Add(hash.SHA1)
|
||||
}
|
||||
@@ -851,12 +802,26 @@ func (f *Fs) Hashes() hash.Set {
|
||||
set.Add(hash.MD5)
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
f.cachedHashes = &set
|
||||
return set
|
||||
}
|
||||
|
||||
// About gets usage stats
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
c, err := f.getSftpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About get SFTP connection")
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "About put SFTP connection")
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(f.root)
|
||||
if f.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
|
||||
@@ -864,12 +829,14 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if len(escapedPath) == 0 {
|
||||
escapedPath = "/"
|
||||
}
|
||||
stdout, err := f.run("df -k " + escapedPath)
|
||||
err = session.Run("df -k " + escapedPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "your remote may not support About")
|
||||
_ = session.Close()
|
||||
return nil, errors.Wrap(err, "About invocation of df failed. Your remote may not support about.")
|
||||
}
|
||||
_ = session.Close()
|
||||
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout)
|
||||
usageTotal, usageUsed, usageAvail := parseUsage(stdout.Bytes())
|
||||
usage := &fs.Usage{}
|
||||
if usageTotal >= 0 {
|
||||
usage.Total = fs.NewUsageValue(usageTotal)
|
||||
@@ -904,27 +871,23 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
_ = o.fs.Hashes()
|
||||
|
||||
var hashCmd string
|
||||
if r == hash.MD5 {
|
||||
if o.md5sum != nil {
|
||||
return *o.md5sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Md5sumCommand
|
||||
hashCmd = "md5sum"
|
||||
} else if r == hash.SHA1 {
|
||||
if o.sha1sum != nil {
|
||||
return *o.sha1sum, nil
|
||||
}
|
||||
hashCmd = o.fs.opt.Sha1sumCommand
|
||||
hashCmd = "sha1sum"
|
||||
} else {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if hashCmd == "" || hashCmd == hashCommandNotSupported {
|
||||
return "", hash.ErrUnsupported
|
||||
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection()
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
@@ -22,9 +24,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -207,16 +207,17 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote swift server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
rootContainer string // container part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of container status
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
segmentsContainer string // container to store the segments (if any) in
|
||||
noCheckContainer bool // don't check the container before creating it
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -241,18 +242,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
if f.root == "" {
|
||||
return f.container
|
||||
}
|
||||
return f.container + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootContainer == "" {
|
||||
return fmt.Sprintf("Swift root")
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.container)
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Swift container %s", f.rootContainer)
|
||||
}
|
||||
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
|
||||
return fmt.Sprintf("Swift container %s path %s", f.container, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -311,23 +312,21 @@ func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
// Pattern to match a swift path
|
||||
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a swift 'url'
|
||||
func parsePath(path string) (container, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find container in swift path %q", path)
|
||||
} else {
|
||||
container, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns container and containerPath from the object
|
||||
func (o *Object) split() (container, containerPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// swiftConnection makes a connection to swift
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
@@ -410,48 +409,47 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
// exists before creating it.
|
||||
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
root: directory,
|
||||
noCheckContainer: noCheckContainer,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
info, rxHeaders, err = f.c.Object(f.rootContainer, f.rootDirectory)
|
||||
info, rxHeaders, err = f.c.Object(container, directory)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == nil && info.ContentType != directoryMarkerContentType {
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -519,26 +517,23 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
|
||||
// listContainerRoot lists the objects into the function supplied from
|
||||
// the container and directory supplied. The remote has prefix
|
||||
// removed from it and if addContainer is set then it adds the
|
||||
// container to the start.
|
||||
// the container and root supplied
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool, fn listFn) error {
|
||||
prefix := root
|
||||
if dir != "" {
|
||||
prefix += dir + "/"
|
||||
}
|
||||
// Options for ObjectsWalk
|
||||
opts := swift.ObjectsOpts{
|
||||
Prefix: directory,
|
||||
Prefix: prefix,
|
||||
Limit: listChunks,
|
||||
}
|
||||
if !recurse {
|
||||
opts.Delimiter = '/'
|
||||
}
|
||||
rootLength := len(root)
|
||||
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
|
||||
var objects []swift.Object
|
||||
var err error
|
||||
@@ -563,10 +558,7 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
// duplicate directories. Ignore them here.
|
||||
continue
|
||||
}
|
||||
remote := object.Name[len(prefix):]
|
||||
if addContainer {
|
||||
remote = path.Join(container, remote)
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
break
|
||||
@@ -580,8 +572,8 @@ func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer
|
||||
type addEntryFn func(fs.DirEntry) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
func (f *Fs) list(container, directory, prefix string, addContainer bool, recurse bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(container, directory, prefix, addContainer, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
|
||||
err := f.listContainerRoot(f.container, f.root, dir, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
|
||||
if isDirectory {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
|
||||
@@ -605,13 +597,22 @@ func (f *Fs) list(container, directory, prefix string, addContainer bool, recurs
|
||||
return err
|
||||
}
|
||||
|
||||
// mark the container as being OK
|
||||
func (f *Fs) markContainerOK() {
|
||||
if f.container != "" {
|
||||
f.containerOKMu.Lock()
|
||||
f.containerOK = true
|
||||
f.containerOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
if container == "" {
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
if f.container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(container, directory, prefix, addContainer, false, func(entry fs.DirEntry) error {
|
||||
err = f.list(dir, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
@@ -619,12 +620,15 @@ func (f *Fs) listDir(container, directory, prefix string, addContainer bool) (en
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
f.markContainerOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
containers, err = f.c.ContainersAll(nil)
|
||||
@@ -634,7 +638,6 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
return nil, errors.Wrap(err, "container listing failed")
|
||||
}
|
||||
for _, container := range containers {
|
||||
f.cache.MarkOK(container.Name)
|
||||
d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -651,14 +654,10 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
if f.container == "" {
|
||||
return f.listContainers(dir)
|
||||
}
|
||||
return f.listDir(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return f.listDir(dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -676,41 +675,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively than doing a directory traversal.
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
container, directory := f.split(dir)
|
||||
if f.container == "" {
|
||||
return errors.New("container needed for recursive list")
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(container, directory, prefix, addContainer, true, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if container == "" {
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container := entry.Remote()
|
||||
err = listR(container, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
}
|
||||
} else {
|
||||
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
err = f.list(dir, true, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.markContainerOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -759,57 +737,57 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
return f.makeContainer(ctx, container)
|
||||
}
|
||||
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
// if we are at the root, then it is OK
|
||||
if f.container == "" {
|
||||
return nil
|
||||
}
|
||||
// Check to see if container exists first
|
||||
var err error = swift.ContainerNotFound
|
||||
if !f.noCheckContainer {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = f.c.Container(f.container)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
if err == swift.ContainerNotFound {
|
||||
headers := swift.Headers{}
|
||||
if f.opt.StoragePolicy != "" {
|
||||
headers["X-Storage-Policy"] = f.opt.StoragePolicy
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q created", container)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, nil)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerCreate(f.container, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
f.containerOK = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the container if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.cache.Remove(container, func() error {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.c.ContainerDelete(container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Container %q removed", container)
|
||||
}
|
||||
return err
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.c.ContainerDelete(f.container)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.containerOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -828,7 +806,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
go func() {
|
||||
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||
}()
|
||||
err := f.list(f.rootContainer, f.rootDirectory, f.rootDirectory, f.rootContainer == "", true, func(entry fs.DirEntry) error {
|
||||
err := f.list("", true, func(entry fs.DirEntry) error {
|
||||
if o, ok := entry.(*Object); ok {
|
||||
toBeDeleted <- o
|
||||
}
|
||||
@@ -855,8 +833,7 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstContainer, dstPath := f.split(remote)
|
||||
err := f.makeContainer(ctx, dstContainer)
|
||||
err := f.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -865,10 +842,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcContainer, srcPath := srcObj.split()
|
||||
srcFs := srcObj.fs
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = f.c.ObjectCopy(srcContainer, srcPath, dstContainer, dstPath, nil)
|
||||
rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -977,9 +954,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
}
|
||||
var info swift.Object
|
||||
var h swift.Headers
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, h, err = o.fs.c.Object(container, containerPath)
|
||||
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetryHeaders(h, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1036,9 +1012,8 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
newHeaders[k] = v
|
||||
}
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectUpdate(container, containerPath, newHeaders)
|
||||
err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1056,10 +1031,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.FixRangeOption(options, o.size)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
_, isRanging := headers["Range"]
|
||||
container, containerPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(container, containerPath, !isRanging, headers)
|
||||
in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
return
|
||||
@@ -1077,20 +1051,20 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
segmentsRoot := o.fs.root + o.remote + "/"
|
||||
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, "", true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
if except != "" && strings.HasPrefix(remote, except) {
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, segmentsContainer)
|
||||
// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", remote, segmentsContainer)
|
||||
segmentPath := segmentsRoot + remote
|
||||
fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
|
||||
var err error
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(segmentsContainer, remote)
|
||||
err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
@@ -1099,11 +1073,11 @@ func (o *Object) removeSegments(except string) error {
|
||||
}
|
||||
// remove the segments container if empty, ignore errors
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerDelete(segmentsContainer)
|
||||
err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(o, "Removed empty container %q", segmentsContainer)
|
||||
fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1128,13 +1102,11 @@ func urlEncode(str string) string {
|
||||
// updateChunks updates the existing object using chunks to a separate
|
||||
// container. It returns a string which prefixes current segments.
|
||||
func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
// Create the segmentsContainer if it doesn't exist
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err = o.fs.c.Container(segmentsContainer)
|
||||
_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err == swift.ContainerNotFound {
|
||||
@@ -1143,7 +1115,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ContainerCreate(segmentsContainer, headers)
|
||||
err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}
|
||||
@@ -1154,7 +1126,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
left := size
|
||||
i := 0
|
||||
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
|
||||
segmentsPath := path.Join(containerPath, uniquePrefix)
|
||||
segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
|
||||
in := bufio.NewReader(in0)
|
||||
segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
|
||||
for {
|
||||
@@ -1163,7 +1135,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
if left > 0 {
|
||||
return "", err // read less than expected
|
||||
}
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
||||
break
|
||||
}
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
@@ -1174,45 +1146,46 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
}
|
||||
segmentReader := io.LimitReader(in, n)
|
||||
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
|
||||
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
|
||||
if err == nil {
|
||||
segmentInfos = append(segmentInfos, segmentPath)
|
||||
}
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
deleteChunks(o, segmentInfos)
|
||||
segmentInfos = nil
|
||||
return "", err
|
||||
}
|
||||
i++
|
||||
}
|
||||
// Upload the manifest
|
||||
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
|
||||
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath))
|
||||
headers["Content-Length"] = "0" // set Content-Length as we know it
|
||||
emptyReader := bytes.NewReader(nil)
|
||||
manifestName := o.fs.root + o.remote
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, emptyReader, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
deleteChunks(o, segmentsContainer, segmentInfos)
|
||||
deleteChunks(o, segmentInfos)
|
||||
segmentInfos = nil
|
||||
}
|
||||
return uniquePrefix + "/", err
|
||||
}
|
||||
|
||||
func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
func deleteChunks(o *Object, segmentInfos []string) {
|
||||
if segmentInfos != nil && len(segmentInfos) > 0 {
|
||||
for _, v := range segmentInfos {
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(segmentsContainer, v)
|
||||
fs.Debugf(o, "Delete segment file %q on %q", v, o.fs.segmentsContainer)
|
||||
e := o.fs.c.ObjectDelete(o.fs.segmentsContainer, v)
|
||||
if e != nil {
|
||||
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, segmentsContainer, e)
|
||||
fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, o.fs.segmentsContainer, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1222,11 +1195,10 @@ func deleteChunks(o *Object, segmentsContainer string, segmentInfos []string) {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
container, containerPath := o.split()
|
||||
if container == "" {
|
||||
return fserrors.FatalError(errors.New("can't upload files to the root"))
|
||||
if o.fs.container == "" {
|
||||
return fserrors.FatalError(errors.New("container name needed in remote"))
|
||||
}
|
||||
err := o.fs.makeContainer(ctx, container)
|
||||
err := o.fs.Mkdir(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1252,17 +1224,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
o.headers = nil // wipe old metadata
|
||||
} else {
|
||||
var inCount *readers.CountingReader
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
|
||||
} else {
|
||||
// otherwise count the size for later
|
||||
inCount = readers.NewCountingReader(in)
|
||||
in = inCount
|
||||
}
|
||||
var rxHeaders swift.Headers
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
rxHeaders, err = o.fs.c.ObjectPut(container, containerPath, in, true, "", contentType, headers)
|
||||
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
|
||||
return shouldRetryHeaders(rxHeaders, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1275,10 +1242,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.md5 = rxHeaders["ETag"]
|
||||
o.contentType = contentType
|
||||
o.headers = headers
|
||||
if inCount != nil {
|
||||
// update the size if streaming from the reader
|
||||
o.size = int64(inCount.BytesRead())
|
||||
}
|
||||
}
|
||||
|
||||
// If file was a dynamic large object then remove old/all segments
|
||||
@@ -1295,14 +1258,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
container, containerPath := o.split()
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -2,19 +2,10 @@
|
||||
package swift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -30,50 +21,3 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
|
||||
// Check that PutStream works with NoChunk as it is the major code
|
||||
// deviation
|
||||
func (f *Fs) testNoChunk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f.opt.NoChunk = true
|
||||
defer func() {
|
||||
f.opt.NoChunk = false
|
||||
}()
|
||||
|
||||
file := fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: "piped data no chunk.txt",
|
||||
Size: -1, // use unknown size during upload
|
||||
}
|
||||
|
||||
const contentSize = 100
|
||||
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
|
||||
file.Hashes = uploadHash.Sums()
|
||||
file.Size = int64(contentSize) // use correct size when checking
|
||||
file.Check(t, obj, f.Precision())
|
||||
|
||||
// Re-read the object and check again
|
||||
obj, err = f.NewObject(ctx, file.Path)
|
||||
require.NoError(t, err)
|
||||
file.Check(t, obj, f.Precision())
|
||||
|
||||
// Delete the object
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "Union merges the contents of several remotes",
|
||||
Description: "A stackable unification remote, which can appear to merge the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remotes",
|
||||
|
||||
@@ -9,7 +9,6 @@ package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
@@ -350,8 +349,6 @@ func untar(srcFile, fileName, extractDir string) {
|
||||
log.Fatalf("Couldn't open gzip: %v", err)
|
||||
}
|
||||
in = gzf
|
||||
} else if srcExt == ".bz2" {
|
||||
in = bzip2.NewReader(f)
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(in)
|
||||
|
||||
@@ -18,7 +18,6 @@ docs = [
|
||||
"docs.md",
|
||||
"remote_setup.md",
|
||||
"filtering.md",
|
||||
"gui.md",
|
||||
"rc.md",
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
@@ -48,8 +47,6 @@ docs = [
|
||||
"qingstor.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"premiumizeme.md",
|
||||
"putio.md",
|
||||
"sftp.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
A demo proxy for rclone serve sftp/webdav/ftp etc
|
||||
|
||||
This takes the incoming user/pass and converts it into an sftp backend
|
||||
running on localhost.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
def main():
|
||||
i = json.load(sys.stdin)
|
||||
o = {
|
||||
"type": "sftp", # type of backend
|
||||
"_root": "", # root of the fs
|
||||
"_obscure": "pass", # comma sep list of fields to obscure
|
||||
"user": i["user"],
|
||||
"pass": i["pass"],
|
||||
"host": "127.0.0.1",
|
||||
}
|
||||
json.dump(o, sys.stdout, indent="\t")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -9,7 +9,6 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -493,7 +492,6 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"errors"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -26,9 +20,6 @@ func init() {
|
||||
configCommand.AddCommand(configUpdateCommand)
|
||||
configCommand.AddCommand(configDeleteCommand)
|
||||
configCommand.AddCommand(configPasswordCommand)
|
||||
configCommand.AddCommand(configReconnectCommand)
|
||||
configCommand.AddCommand(configDisconnectCommand)
|
||||
configCommand.AddCommand(configUserInfoCommand)
|
||||
}
|
||||
|
||||
var configCommand = &cobra.Command{
|
||||
@@ -216,99 +207,3 @@ func argsToMap(args []string) (out rc.Params, err error) {
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
var configReconnectCommand = &cobra.Command{
|
||||
Use: "reconnect remote:",
|
||||
Short: `Re-authenticates user with remote.`,
|
||||
Long: `
|
||||
This reconnects remote: passed in to the cloud storage system.
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
This normally means going through the interactive oauth flow again.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fsInfo.Config == nil {
|
||||
return errors.Errorf("%s: doesn't support Reconnect", configName)
|
||||
}
|
||||
fsInfo.Config(configName, config)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var configDisconnectCommand = &cobra.Command{
|
||||
Use: "disconnect remote:",
|
||||
Short: `Disconnects user from remote`,
|
||||
Long: `
|
||||
This disconnects the remote: passed in to the cloud storage system.
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
To reconnect use "rclone config reconnect".
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
doDisconnect := f.Features().Disconnect
|
||||
if doDisconnect == nil {
|
||||
return errors.Errorf("%v doesn't support Disconnect", f)
|
||||
}
|
||||
err := doDisconnect(context.Background())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Disconnect call failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
jsonOutput bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
configUserInfoCommand.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
|
||||
}
|
||||
|
||||
var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `
|
||||
This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
doUserInfo := f.Features().UserInfo
|
||||
if doUserInfo == nil {
|
||||
return errors.Errorf("%v doesn't support UserInfo", f)
|
||||
}
|
||||
u, err := doUserInfo(context.Background())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "UserInfo call failed")
|
||||
}
|
||||
if jsonOutput {
|
||||
out := json.NewEncoder(os.Stdout)
|
||||
out.SetIndent("", "\t")
|
||||
return out.Encode(u)
|
||||
}
|
||||
var keys []string
|
||||
var maxKeyLen int
|
||||
for key := range u {
|
||||
keys = append(keys, key)
|
||||
if len(key) > maxKeyLen {
|
||||
maxKeyLen = len(key)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
fmt.Printf("%*s: %s\n", maxKeyLen, key, u[key])
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -300,7 +300,6 @@ func showBackend(name string) {
|
||||
optionsType := "standard"
|
||||
for _, opts := range []fs.Options{standardOptions, advancedOptions} {
|
||||
if len(opts) == 0 {
|
||||
optionsType = "advanced"
|
||||
continue
|
||||
}
|
||||
fmt.Printf("### %s Options\n\n", strings.Title(optionsType))
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -118,7 +118,7 @@ func (r *results) Print() {
|
||||
|
||||
// writeFile writes a file with some random contents
|
||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
contents := random.String(50)
|
||||
contents := fstest.RandomString(50)
|
||||
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
@@ -73,6 +74,11 @@ func (f *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenR
|
||||
return nil, translateError(err)
|
||||
}
|
||||
|
||||
// See if seeking is supported and set FUSE hint accordingly
|
||||
if _, err = handle.Seek(0, io.SeekCurrent); err != nil {
|
||||
resp.Flags |= fuse.OpenNonSeekable
|
||||
}
|
||||
|
||||
return &FileHandle{handle}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
}
|
||||
if len(mountlib.ExtraFlags) > 0 {
|
||||
if len(mountlib.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
|
||||
}
|
||||
return options
|
||||
|
||||
@@ -161,7 +161,10 @@ applications won't work with their files on an rclone mount without
|
||||
Caching](#file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (eg Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
Hubic) won't work from the root - you will need to specify a bucket,
|
||||
or a path within the bucket. So ` + "`swift:`" + ` won't work whereas
|
||||
` + "`swift:bucket`" + ` will as will ` + "`swift:bucket/path`" + `.
|
||||
None of these support the concept of directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
|
||||
@@ -115,7 +115,7 @@ func newRun() *Run {
|
||||
fstest.Initialise()
|
||||
|
||||
var err error
|
||||
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote()
|
||||
r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
|
||||
}
|
||||
|
||||
13
cmd/rc/rc.go
13
cmd/rc/rc.go
@@ -117,16 +117,7 @@ func doCall(path string, in rc.Params) (out rc.Params, err error) {
|
||||
if call == nil {
|
||||
return nil, errors.Errorf("method %q not found", path)
|
||||
}
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "loopback call failed")
|
||||
}
|
||||
// Reshape (serialize then deserialize) the data so it is in the form expected
|
||||
err = rc.Reshape(&out, out)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "loopback reshape failed")
|
||||
}
|
||||
return out, nil
|
||||
return call.Fn(context.Background(), in)
|
||||
}
|
||||
|
||||
// Do HTTP request
|
||||
@@ -236,7 +227,7 @@ func list() error {
|
||||
if !ok {
|
||||
return errors.New("bad JSON")
|
||||
}
|
||||
fmt.Printf("### %s: %s {#%s}\n\n", info["Path"], info["Title"], info["Path"])
|
||||
fmt.Printf("### %s: %s\n\n", info["Path"], info["Title"])
|
||||
fmt.Printf("%s\n\n", info["Help"])
|
||||
if authRequired := info["AuthRequired"]; authRequired != nil {
|
||||
if authRequired.(bool) {
|
||||
|
||||
@@ -3,14 +3,12 @@ package rcd
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
@@ -19,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/errors"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -57,23 +54,6 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
if err := checkRelease(rcflags.Opt.WebGUIUpdate); err != nil {
|
||||
log.Fatalf("Error while fetching the latest release of rclone-webui-react %v", err)
|
||||
}
|
||||
if rcflags.Opt.NoAuth {
|
||||
rcflags.Opt.NoAuth = false
|
||||
fs.Infof(nil, "Cannot run web-gui without authentication, using default auth")
|
||||
}
|
||||
if rcflags.Opt.HTTPOptions.BasicUser == "" {
|
||||
rcflags.Opt.HTTPOptions.BasicUser = "gui"
|
||||
fs.Infof(nil, "Using default username: %s \n", rcflags.Opt.HTTPOptions.BasicUser)
|
||||
}
|
||||
if rcflags.Opt.HTTPOptions.BasicPass == "" {
|
||||
randomPass, err := random.Password(128)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make password: %v", err)
|
||||
}
|
||||
rcflags.Opt.HTTPOptions.BasicPass = randomPass
|
||||
fs.Infof(nil, "No password specified. Using random password: %s \n", randomPass)
|
||||
}
|
||||
rcflags.Opt.Serve = true
|
||||
}
|
||||
|
||||
s, err := rcserver.Start(&rcflags.Opt)
|
||||
@@ -90,34 +70,29 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
|
||||
//checkRelease is a helper function to download and setup latest release of rclone-webui-react
|
||||
func checkRelease(shouldUpdate bool) (err error) {
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := getLatestReleaseURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zipName := tag + ".zip"
|
||||
cachePath := filepath.Join(config.CacheDir, "webgui")
|
||||
zipPath := filepath.Join(cachePath, zipName)
|
||||
extractPath := filepath.Join(cachePath, "current")
|
||||
oldUpdateExists := exists(extractPath)
|
||||
|
||||
// if the old file exists does not exist or forced update is enforced.
|
||||
// TODO: Add hashing to check integrity of the previous update.
|
||||
if !oldUpdateExists || shouldUpdate {
|
||||
// Get the latest release details
|
||||
WebUIURL, tag, size, err := getLatestReleaseURL()
|
||||
if err != nil {
|
||||
return err
|
||||
if !exists(cachePath) {
|
||||
if err := os.MkdirAll(cachePath, 755); err != nil {
|
||||
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
|
||||
}
|
||||
|
||||
zipName := tag + ".zip"
|
||||
zipPath := filepath.Join(cachePath, zipName)
|
||||
|
||||
if !exists(cachePath) {
|
||||
if err := os.MkdirAll(cachePath, 0755); err != nil {
|
||||
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Load the file
|
||||
exists := exists(zipPath)
|
||||
// if the zipFile does not exist or forced update is enforced.
|
||||
if !exists || shouldUpdate {
|
||||
fs.Logf(nil, "A new release for gui is present at "+WebUIURL)
|
||||
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
|
||||
|
||||
// download the zip from latest url
|
||||
err = downloadFile(zipPath, WebUIURL)
|
||||
err := downloadFile(zipPath, WebUIURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -181,8 +156,6 @@ func downloadFile(filepath string, url string) error {
|
||||
|
||||
// unzip is a helper function to unzip a file specified in src to path dest
|
||||
func unzip(src, dest string) (err error) {
|
||||
dest = filepath.Clean(dest) + string(os.PathSeparator)
|
||||
|
||||
r, err := zip.OpenReader(src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -195,27 +168,23 @@ func unzip(src, dest string) (err error) {
|
||||
|
||||
// Closure to address file descriptors issue with all the deferred .Close() methods
|
||||
extractAndWriteFile := func(f *zip.File) error {
|
||||
path := filepath.Join(dest, f.Name)
|
||||
// Check for Zip Slip: https://github.com/rclone/rclone/issues/3529
|
||||
if !strings.HasPrefix(path, dest) {
|
||||
return fmt.Errorf("%s: illegal file path", path)
|
||||
}
|
||||
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
|
||||
path := filepath.Join(dest, f.Name)
|
||||
|
||||
if f.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
if err := os.MkdirAll(path, f.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), f.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package dlna
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -13,7 +11,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/anacrolix/dms/dlna"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
@@ -23,39 +20,6 @@ import (
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Add a minimal number of mime types to augment go's built in types
|
||||
// for environments which don't have access to a mime.types file (eg
|
||||
// Termux on android)
|
||||
func init() {
|
||||
for _, t := range []struct {
|
||||
mimeType string
|
||||
extensions string
|
||||
}{
|
||||
{"audio/flac", ".flac"},
|
||||
{"audio/mpeg", ".mpga,.mpega,.mp2,.mp3,.m4a"},
|
||||
{"audio/ogg", ".oga,.ogg,.opus,.spx"},
|
||||
{"audio/x-wav", ".wav"},
|
||||
{"image/tiff", ".tiff,.tif"},
|
||||
{"video/dv", ".dif,.dv"},
|
||||
{"video/fli", ".fli"},
|
||||
{"video/mpeg", ".mpeg,.mpg,.mpe"},
|
||||
{"video/MP2T", ".ts"},
|
||||
{"video/mp4", ".mp4"},
|
||||
{"video/quicktime", ".qt,.mov"},
|
||||
{"video/ogg", ".ogv"},
|
||||
{"video/webm", ".webm"},
|
||||
{"video/x-msvideo", ".avi"},
|
||||
{"video/x-matroska", ".mpv,.mkv"},
|
||||
} {
|
||||
for _, ext := range strings.Split(t.extensions, ",") {
|
||||
err := mime.AddExtensionType(ext, t.mimeType)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type contentDirectoryService struct {
|
||||
*server
|
||||
upnp.Eventing
|
||||
@@ -69,7 +33,7 @@ var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/")
|
||||
|
||||
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
||||
// returned if the entry is not of interest.
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, host string) (ret interface{}, err error) {
|
||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo os.FileInfo, host string) (ret interface{}, err error) {
|
||||
obj := upnpav.Object{
|
||||
ID: cdsObject.ID(),
|
||||
Restricted: 1,
|
||||
@@ -87,15 +51,7 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
||||
return
|
||||
}
|
||||
|
||||
// Read the mime type from the fs.Object if possible,
|
||||
// otherwise fall back to working out what it is from the file path.
|
||||
var mimeType string
|
||||
if o, ok := fileInfo.DirEntry().(fs.Object); ok {
|
||||
mimeType = fs.MimeType(context.TODO(), o)
|
||||
} else {
|
||||
mimeType = fs.MimeTypeFromName(fileInfo.Name())
|
||||
}
|
||||
|
||||
mimeType := fs.MimeTypeFromName(fileInfo.Name())
|
||||
mediaType := mediaMimeTypeRegexp.FindStringSubmatch(mimeType)
|
||||
if mediaType == nil {
|
||||
return
|
||||
|
||||
@@ -47,9 +47,10 @@ func listInterfaces() []net.Interface {
|
||||
|
||||
var active []net.Interface
|
||||
for _, intf := range ifs {
|
||||
if intf.Flags&net.FlagUp != 0 && intf.Flags&net.FlagMulticast != 0 && intf.MTU > 0 {
|
||||
active = append(active, intf)
|
||||
if intf.Flags&net.FlagUp == 0 || intf.MTU <= 0 {
|
||||
continue
|
||||
}
|
||||
active = append(active, intf)
|
||||
}
|
||||
return active
|
||||
}
|
||||
|
||||
@@ -5,68 +5,30 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpflags"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
// AddFlags adds flags for ftp
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
|
||||
}
|
||||
|
||||
func init() {
|
||||
ftpflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
AddFlags(Command.Flags())
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -77,33 +39,12 @@ var Command = &cobra.Command{
|
||||
rclone serve ftp implements a basic ftp server to serve the
|
||||
remote over FTP protocol. This can be viewed with a ftp client
|
||||
or you can make a remote of type ftp to read and write it.
|
||||
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs. By default it only listens on localhost. You can use port
|
||||
:0 to let the OS choose an available port.
|
||||
|
||||
If you set --addr to listen on a public or LAN accessible IP address
|
||||
then using Authentication is advised - see the next section for info.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
` + vfs.Help + proxy.Help,
|
||||
` + ftpopt.Help + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s, err := newServer(f, &Opt)
|
||||
s, err := newServer(f, &ftpflags.Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -114,17 +55,12 @@ You can set a single username and password with the --user and --pass flags.
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
proxy *proxy.Proxy
|
||||
pendingMu sync.Mutex
|
||||
pending map[string]*Driver // pending Driver~s that haven't got their VFS
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
}
|
||||
|
||||
// Make a new FTP to serve the remote
|
||||
func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
|
||||
host, port, err := net.SplitHostPort(opt.ListenAddr)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse host:port")
|
||||
@@ -134,31 +70,27 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
return nil, errors.New("Failed to parse host:port")
|
||||
}
|
||||
|
||||
s := &server{
|
||||
f: f,
|
||||
opt: *opt,
|
||||
pending: make(map[string]*Driver),
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(&proxyflags.Opt)
|
||||
} else {
|
||||
s.vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
|
||||
ftpopt := &ftp.ServerOpts{
|
||||
Name: "Rclone FTP Server",
|
||||
WelcomeMessage: "Welcome to Rclone " + fs.Version + " FTP Server",
|
||||
Factory: s, // implemented by NewDriver method
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: s, // implemented by CheckPasswd method
|
||||
Logger: &Logger{},
|
||||
WelcomeMessage: "Welcome on Rclone FTP Server",
|
||||
Factory: &DriverFactory{
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
},
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: &Auth{
|
||||
BasicUser: opt.BasicUser,
|
||||
BasicPass: opt.BasicPass,
|
||||
},
|
||||
Logger: &Logger{},
|
||||
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
|
||||
}
|
||||
s.srv = ftp.NewServer(ftpopt)
|
||||
return s, nil
|
||||
return &server{
|
||||
f: f,
|
||||
srv: ftp.NewServer(ftpopt),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// serve runs the ftp server
|
||||
@@ -200,106 +132,39 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
|
||||
fs.Infof(sessionID, "< %d %s", code, message)
|
||||
}
|
||||
|
||||
// findID finds the connection ID of the calling program. It does
|
||||
// this in an incredibly hacky way by looking in the stack trace.
|
||||
//
|
||||
// callerName should be the name of the function that we are looking
|
||||
// for with a trailing '('
|
||||
//
|
||||
// What is really needed is a change of calling protocol so
|
||||
// CheckPassword is called with the connection.
|
||||
func findID(callerName []byte) (string, error) {
|
||||
// Dump the stack in this format
|
||||
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
buf = buf[:n]
|
||||
|
||||
// look for callerName first
|
||||
i := bytes.Index(buf, callerName)
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: caller name not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[i+len(callerName):]
|
||||
|
||||
// find next ')'
|
||||
i = bytes.IndexByte(buf, ')')
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: end of args not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[:i]
|
||||
|
||||
// trim off first argument
|
||||
// find next ','
|
||||
i = bytes.IndexByte(buf, ',')
|
||||
if i >= 0 {
|
||||
buf = buf[:i]
|
||||
}
|
||||
|
||||
return string(buf), nil
|
||||
//Auth struct to handle ftp auth (temporary simple for POC)
|
||||
type Auth struct {
|
||||
BasicUser string
|
||||
BasicPass string
|
||||
}
|
||||
|
||||
var connServeFunction = []byte("(*Conn).Serve(")
|
||||
|
||||
// CheckPasswd handle auth based on configuration
|
||||
func (s *server) CheckPasswd(user, pass string) (ok bool, err error) {
|
||||
var VFS *vfs.VFS
|
||||
if s.proxy != nil {
|
||||
VFS, _, err = s.proxy.Call(user, pass)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
id, err := findID(connServeFunction)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: failed to read ID from stack: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
s.pendingMu.Lock()
|
||||
d := s.pending[id]
|
||||
delete(s.pending, id)
|
||||
s.pendingMu.Unlock()
|
||||
if d == nil {
|
||||
return false, errors.Errorf("proxy login failed: failed to find pending Driver under ID %q", id)
|
||||
}
|
||||
d.vfs = VFS
|
||||
} else {
|
||||
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
|
||||
if !ok {
|
||||
fs.Infof(nil, "login failed: bad credentials")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
//CheckPasswd handle auth based on configuration
|
||||
func (a *Auth) CheckPasswd(user, pass string) (bool, error) {
|
||||
return a.BasicUser == user && (a.BasicPass == "" || a.BasicPass == pass), nil
|
||||
}
|
||||
|
||||
// NewDriver starts a new session for each client connection
|
||||
func (s *server) NewDriver() (ftp.Driver, error) {
|
||||
//DriverFactory factory of ftp driver for each session
|
||||
type DriverFactory struct {
|
||||
vfs *vfs.VFS
|
||||
}
|
||||
|
||||
//NewDriver start a new session
|
||||
func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
|
||||
log.Trace("", "Init driver")("")
|
||||
d := &Driver{
|
||||
s: s,
|
||||
vfs: s.vfs, // this can be nil if proxy set
|
||||
}
|
||||
return d, nil
|
||||
return &Driver{
|
||||
vfs: f.vfs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Driver implementation of ftp server
|
||||
type Driver struct {
|
||||
s *server
|
||||
vfs *vfs.VFS
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
//Init a connection
|
||||
func (d *Driver) Init(c *ftp.Conn) {
|
||||
func (d *Driver) Init(*ftp.Conn) {
|
||||
defer log.Trace("", "Init session")("")
|
||||
if d.s.proxy != nil {
|
||||
id := fmt.Sprintf("%p", c)
|
||||
d.s.pendingMu.Lock()
|
||||
d.s.pending[id] = d
|
||||
d.s.pendingMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
//Stat get information on file or folder
|
||||
|
||||
@@ -8,72 +8,83 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testHOST = "localhost"
|
||||
testPORT = "51780"
|
||||
testPASSIVEPORTRANGE = "30000-32000"
|
||||
testUSER = "rclone"
|
||||
testPASS = "password"
|
||||
)
|
||||
|
||||
// TestFTP runs the ftp server then runs the unit tests for the
|
||||
// ftp remote against it.
|
||||
func TestFTP(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.BasicUser = testUSER
|
||||
opt.BasicPass = testPASS
|
||||
opt := ftpopt.DefaultOpt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.BasicUser = "rclone"
|
||||
opt.BasicPass = "password"
|
||||
|
||||
w, err := newServer(f, &opt)
|
||||
assert.NoError(t, err)
|
||||
fstest.Initialise()
|
||||
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
err := w.serve()
|
||||
close(quit)
|
||||
if err != ftp.ErrServerClosed {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "ftp",
|
||||
"host": testHOST,
|
||||
"port": testPORT,
|
||||
"user": testUSER,
|
||||
"pass": obscure.MustObscure(testPASS),
|
||||
}
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
return config, func() {
|
||||
err := w.close()
|
||||
// Start the server
|
||||
w, err := newServer(fremote, &opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
go func() {
|
||||
err := w.serve()
|
||||
if err != ftp.ErrServerClosed {
|
||||
assert.NoError(t, err)
|
||||
<-quit
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
err := w.close()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/ftp")
|
||||
assert.NoError(t, err, "failed to cd to ftp remote")
|
||||
|
||||
// Run the ftp tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
|
||||
servetest.Run(t, "ftp", start)
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
id, err := findID([]byte("TestFindID("))
|
||||
require.NoError(t, err)
|
||||
// id should be the argument to this function
|
||||
assert.Equal(t, fmt.Sprintf("%p", t), id)
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
|
||||
args = append(args, "-remote", "ftptest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_FTPTEST_TYPE=ftp",
|
||||
"RCLONE_CONFIG_FTPTEST_HOST="+testHOST,
|
||||
"RCLONE_CONFIG_FTPTEST_PORT="+testPORT,
|
||||
"RCLONE_CONFIG_FTPTEST_USER=rclone",
|
||||
"RCLONE_CONFIG_FTPTEST_PASS=0HU5Hx42YiLoNGJxppOOP3QTbr-KB_MP", // ./rclone obscure password
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running ftp integration tests")
|
||||
}
|
||||
|
||||
28
cmd/serve/ftp/ftpflags/ftpflags.go
Normal file
28
cmd/serve/ftp/ftpflags/ftpflags.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package ftpflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = ftpopt.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlagsPrefix adds flags for the ftpopt
|
||||
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the httplib
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
AddFlagsPrefix(flagSet, "", &Opt)
|
||||
}
|
||||
40
cmd/serve/ftp/ftpopt/ftp_options.go
Normal file
40
cmd/serve/ftp/ftpopt/ftp_options.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package ftpopt
|
||||
|
||||
// Help contains text describing the http server to add to the command
|
||||
// help.
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs. By default it only listens on localhost. You can use port
|
||||
:0 to let the OS choose an available port.
|
||||
|
||||
If you set --addr to listen on a public or LAN accessible IP address
|
||||
then using Authentication is advised - see the next section for info.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
`
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
mux.HandleFunc(s.Opt.BaseURL+"/", s.handler)
|
||||
mux.HandleFunc("/", s.handler)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -93,10 +93,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Server", "rclone/"+fs.Version)
|
||||
|
||||
urlPath, ok := s.Path(w, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
urlPath := r.URL.Path
|
||||
isDir := strings.HasSuffix(urlPath, "/")
|
||||
remote := strings.Trim(urlPath, "/")
|
||||
if isDir {
|
||||
|
||||
@@ -26,8 +26,6 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.")
|
||||
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the httplib
|
||||
|
||||
@@ -44,14 +44,6 @@ for a transfer.
|
||||
--max-header-bytes controls the maximum number of bytes the server will
|
||||
accept in the HTTP header.
|
||||
|
||||
--baseurl controls the URL prefix that rclone serves from. By default
|
||||
rclone will serve from the root. If you used --baseurl "/rclone" then
|
||||
rclone would serve from a URL starting with "/rclone/". This is
|
||||
useful if you wish to proxy rclone serve. Rclone automatically
|
||||
inserts leading and trailing "/" on --baseurl, so --baseurl "rclone",
|
||||
--baseurl "/rclone" and --baseurl "/rclone/" are all treated
|
||||
identically.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
@@ -89,7 +81,6 @@ certificate authority certificate.
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
ListenAddr string // Port to listen on
|
||||
BaseURL string // prefix to strip from URLs
|
||||
ServerReadTimeout time.Duration // Timeout for server reading data
|
||||
ServerWriteTimeout time.Duration // Timeout for server writing data
|
||||
MaxHeaderBytes int // Maximum size of request header
|
||||
@@ -100,15 +91,8 @@ type Options struct {
|
||||
Realm string // realm for authentication
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
Auth AuthFn `json:"-"` // custom Auth (not set by command line flags)
|
||||
}
|
||||
|
||||
// AuthFn if used will be used to authenticate user, pass. If an error
|
||||
// is returned then the user is not authenticated.
|
||||
//
|
||||
// If a non nil value is returned then it is added to the context under the key
|
||||
type AuthFn func(user, pass string) (value interface{}, err error)
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:8080",
|
||||
@@ -133,14 +117,9 @@ type Server struct {
|
||||
|
||||
type contextUserType struct{}
|
||||
|
||||
// ContextUserKey is a simple context key for storing the username of the request
|
||||
// ContextUserKey is a simple context key
|
||||
var ContextUserKey = &contextUserType{}
|
||||
|
||||
type contextAuthType struct{}
|
||||
|
||||
// ContextAuthKey is a simple context key for storing info returned by AuthFn
|
||||
var ContextAuthKey = &contextAuthType{}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
func (s *Server) singleUserProvider(user, realm string) string {
|
||||
if user == s.Opt.BasicUser {
|
||||
@@ -149,27 +128,6 @@ func (s *Server) singleUserProvider(user, realm string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseAuthorization parses the Authorization header into user, pass
|
||||
// it returns a boolean as to whether the parse was successful
|
||||
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
parts := strings.SplitN(string(b), ":", 2)
|
||||
user = parts[0]
|
||||
if len(parts) > 1 {
|
||||
pass = parts[1]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewServer creates an http server. The opt can be nil in which case
|
||||
// the default options will be used.
|
||||
func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
@@ -185,20 +143,17 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
}
|
||||
|
||||
// Use htpasswd if required on everything
|
||||
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil {
|
||||
var authenticator *auth.BasicAuth
|
||||
if s.Opt.Auth == nil {
|
||||
var secretProvider auth.SecretProvider
|
||||
if s.Opt.HtPasswd != "" {
|
||||
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
|
||||
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
|
||||
} else {
|
||||
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
|
||||
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" {
|
||||
var secretProvider auth.SecretProvider
|
||||
if s.Opt.HtPasswd != "" {
|
||||
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
|
||||
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
|
||||
} else {
|
||||
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
|
||||
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
oldHandler := handler
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// No auth wanted for OPTIONS method
|
||||
@@ -206,36 +161,26 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
unauthorized := func() {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`)
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
}
|
||||
user, pass, authValid := parseAuthorization(r)
|
||||
if !authValid {
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if s.Opt.Auth == nil {
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
|
||||
unauthorized()
|
||||
return
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
var userName = "UNKNOWN"
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
userName = strings.SplitN(string(b), ":", 2)[0]
|
||||
}
|
||||
}
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
|
||||
} else {
|
||||
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
|
||||
}
|
||||
authenticator.RequireAuth(w, r)
|
||||
} else {
|
||||
// Custom Auth
|
||||
value, err := s.Opt.Auth(user, pass)
|
||||
if err != nil {
|
||||
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if value != nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value))
|
||||
}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, username))
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user))
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
})
|
||||
s.usingAuth = true
|
||||
}
|
||||
@@ -245,12 +190,6 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
log.Fatalf("Need both -cert and -key to use SSL")
|
||||
}
|
||||
|
||||
// If a Base URL is set then serve from there
|
||||
s.Opt.BaseURL = strings.Trim(s.Opt.BaseURL, "/")
|
||||
if s.Opt.BaseURL != "" {
|
||||
s.Opt.BaseURL = "/" + s.Opt.BaseURL
|
||||
}
|
||||
|
||||
// FIXME make a transport?
|
||||
s.httpServer = &http.Server{
|
||||
Addr: s.Opt.ListenAddr,
|
||||
@@ -360,27 +299,10 @@ func (s *Server) URL() string {
|
||||
// (i.e. port assigned by operating system)
|
||||
addr = s.listener.Addr().String()
|
||||
}
|
||||
return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.BaseURL)
|
||||
return fmt.Sprintf("%s://%s/", proto, addr)
|
||||
}
|
||||
|
||||
// UsingAuth returns true if authentication is required
|
||||
func (s *Server) UsingAuth() bool {
|
||||
return s.usingAuth
|
||||
}
|
||||
|
||||
// Path returns the current path with the Prefix stripped
|
||||
//
|
||||
// If it returns false, then the path was invalid and the handler
|
||||
// should exit as the error response has already been sent
|
||||
func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) {
|
||||
Path = r.URL.Path
|
||||
if s.Opt.BaseURL == "" {
|
||||
return Path, true
|
||||
}
|
||||
if !strings.HasPrefix(Path, s.Opt.BaseURL+"/") {
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return Path, false
|
||||
}
|
||||
Path = Path[len(s.Opt.BaseURL):]
|
||||
return Path, true
|
||||
}
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
// Package proxy implements a programmable proxy for rclone serve
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
libcache "github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// Help contains text describing how to use the proxy
|
||||
var Help = strings.Replace(`
|
||||
### Auth Proxy
|
||||
|
||||
If you supply the parameter |--auth-proxy /path/to/program| then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocl with input on STDIN and output on STDOUT.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
|
||||
The program's job is to take a |user| and |pass| on the input and turn
|
||||
those into the config for a backend on STDOUT in JSON format. This
|
||||
config will have any default parameters for the backend added, but it
|
||||
won't use configuration from environment variables or command line
|
||||
options - it is the job of the proxy program to make a complete
|
||||
config.
|
||||
|
||||
This config generated must have this extra parameter
|
||||
- |_root| - root to use for the backend
|
||||
|
||||
And it may have this parameter
|
||||
- |_obscure| - comma separated strings for parameters to obscure
|
||||
|
||||
For example the program might take this on STDIN
|
||||
|
||||
|||
|
||||
{
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
}
|
||||
|||
|
||||
|
||||
And return this on STDOUT
|
||||
|
||||
|||
|
||||
{
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
}
|
||||
|||
|
||||
|
||||
This would mean that an SFTP backend would be created on the fly for
|
||||
the |user| and |pass| returned in the output to the host given. Note
|
||||
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
|
||||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
|
||||
The progam can manipulate the supplied |user| in any way, for example
|
||||
to make proxy to many different sftp backends, you could make the
|
||||
|user| be |user@example.com| and then set the |host| to |example.com|
|
||||
in the output and the user to |user|. For security you'd probably want
|
||||
to restrict the |host| to a limited list.
|
||||
|
||||
Note that an internal cache is keyed on |user| so only use that for
|
||||
configuration, don't use |pass|. This also means that if a user's
|
||||
password is changed the cache will need to expire (which takes 5 mins)
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
backend that rclone supports.
|
||||
`, "|", "`", -1)
|
||||
|
||||
// Options is options for creating the proxy
|
||||
type Options struct {
|
||||
AuthProxy string
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values uses for Opt
|
||||
var DefaultOpt = Options{
|
||||
AuthProxy: "",
|
||||
}
|
||||
|
||||
// Proxy represents a proxy to turn auth requests into a VFS
|
||||
type Proxy struct {
|
||||
cmdLine []string // broken down command line
|
||||
vfsCache *libcache.Cache
|
||||
Opt Options
|
||||
}
|
||||
|
||||
// cacheEntry is what is stored in the vfsCache
|
||||
type cacheEntry struct {
|
||||
vfs *vfs.VFS // stored VFS
|
||||
pwHash []byte // bcrypt hash of the password
|
||||
}
|
||||
|
||||
// New creates a new proxy with the Options passed in
|
||||
func New(opt *Options) *Proxy {
|
||||
return &Proxy{
|
||||
Opt: *opt,
|
||||
cmdLine: strings.Fields(opt.AuthProxy),
|
||||
vfsCache: libcache.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// run the proxy command returning a config map
|
||||
func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
|
||||
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
|
||||
inBytes, err := json.MarshalIndent(in, "", "\t")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
|
||||
}
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdin = bytes.NewBuffer(inBytes)
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
start := time.Now()
|
||||
err = cmd.Run()
|
||||
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
|
||||
duration := time.Since(start)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
|
||||
}
|
||||
err = json.Unmarshal(stdout.Bytes(), &config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
|
||||
}
|
||||
fs.Debugf(nil, "Proxy returned in %v", duration)
|
||||
|
||||
// Obscure any values in the config map that need it
|
||||
obscureFields, ok := config.Get("_obscure")
|
||||
if ok {
|
||||
for _, key := range strings.Split(obscureFields, ",") {
|
||||
value, ok := config.Get(key)
|
||||
if ok {
|
||||
obscuredValue, err := obscure.Obscure(value)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "proxy")
|
||||
}
|
||||
config.Set(key, obscuredValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// call runs the auth proxy and returns a cacheEntry and an error
|
||||
func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}, err error) {
|
||||
// Contact the proxy
|
||||
config, err := p.run(map[string]string{
|
||||
"user": user,
|
||||
"pass": pass,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look for required fields in the answer
|
||||
fsName, ok := config.Get("type")
|
||||
if !ok {
|
||||
return nil, errors.New("proxy: type not set in result")
|
||||
}
|
||||
root, ok := config.Get("_root")
|
||||
if !ok {
|
||||
return nil, errors.New("proxy: _root not set in result")
|
||||
}
|
||||
|
||||
// Find the backend
|
||||
fsInfo, err := fs.Find(fsName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
|
||||
}
|
||||
|
||||
// base name of config on user name. This may appear in logs
|
||||
name := "proxy-" + user
|
||||
fsString := name + ":" + root
|
||||
|
||||
// Look for fs in the VFS cache
|
||||
value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) {
|
||||
// Create the Fs from the cache
|
||||
f, err := cache.GetFn(fsString, func(fsString string) (fs.Fs, error) {
|
||||
// Update the config with the default values
|
||||
for i := range fsInfo.Options {
|
||||
o := &fsInfo.Options[i]
|
||||
if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" {
|
||||
config.Set(o.Name, o.String())
|
||||
}
|
||||
}
|
||||
return fsInfo.NewFs(name, root, config)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
entry := cacheEntry{
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
pwHash: pwHash,
|
||||
}
|
||||
return entry, true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed to create backend")
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Call runs the auth proxy with the given input, returning a *vfs.VFS
|
||||
// and the key used in the VFS cache.
|
||||
func (p *Proxy) Call(user, pass string) (VFS *vfs.VFS, vfsKey string, err error) {
|
||||
var passwordBytes = []byte(pass)
|
||||
|
||||
// Look in the cache first
|
||||
value, ok := p.vfsCache.GetMaybe(user)
|
||||
|
||||
// If not found then call the proxy for a fresh answer
|
||||
if !ok {
|
||||
value, err = p.call(user, pass, passwordBytes)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
// check we got what we were expecting
|
||||
entry, ok := value.(cacheEntry)
|
||||
if !ok {
|
||||
return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
|
||||
}
|
||||
|
||||
// Check the password is correct in the cached entry. This
|
||||
// prevents an attack where subsequent requests for the same
|
||||
// user don't have their auth checked. It does mean that if
|
||||
// the password is changed, the user will have to wait for
|
||||
// cache expiry (5m) before trying again.
|
||||
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "proxy: incorrect password")
|
||||
}
|
||||
|
||||
return entry.vfs, user, nil
|
||||
}
|
||||
|
||||
// Get VFS from the cache using key - returns nil if not found
|
||||
func (p *Proxy) Get(key string) *vfs.VFS {
|
||||
value, ok := p.vfsCache.GetMaybe(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
entry := value.(cacheEntry)
|
||||
return entry.vfs
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
// A simple auth proxy for testing purposes
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Read the input
|
||||
var in map[string]string
|
||||
err := json.NewDecoder(os.Stdin).Decode(&in)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Write the output
|
||||
var out = map[string]string{}
|
||||
for k, v := range in {
|
||||
switch k {
|
||||
case "user":
|
||||
v += "-test"
|
||||
case "error":
|
||||
log.Fatal(v)
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
if out["type"] == "" {
|
||||
out["type"] = "local"
|
||||
}
|
||||
if out["_root"] == "" {
|
||||
out["_root"] = ""
|
||||
}
|
||||
json.NewEncoder(os.Stdout).Encode(&out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
opt := DefaultOpt
|
||||
cmd := "go run proxy_code.go"
|
||||
opt.AuthProxy = cmd
|
||||
p := New(&opt)
|
||||
|
||||
t.Run("Normal", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"type": "ftp",
|
||||
"user": "me",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"type": "ftp",
|
||||
"user": "me-test",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_root": "",
|
||||
}, config)
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"error": "potato",
|
||||
})
|
||||
assert.Nil(t, config)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "potato")
|
||||
})
|
||||
|
||||
t.Run("Obscure", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"type": "ftp",
|
||||
"user": "me",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_obscure": "pass,user",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
config["user"] = obscure.MustReveal(config["user"])
|
||||
config["pass"] = obscure.MustReveal(config["pass"])
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"type": "ftp",
|
||||
"user": "me-test",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_obscure": "pass,user",
|
||||
"_root": "",
|
||||
}, config)
|
||||
})
|
||||
|
||||
const testUser = "testUser"
|
||||
const testPass = "testPass"
|
||||
|
||||
t.Run("call", func(t *testing.T) {
|
||||
// check cache empty
|
||||
assert.Equal(t, 0, p.vfsCache.Entries())
|
||||
defer p.vfsCache.Clear()
|
||||
|
||||
passwordBytes := []byte(testPass)
|
||||
value, err := p.call(testUser, testPass, passwordBytes)
|
||||
require.NoError(t, err)
|
||||
entry, ok := value.(cacheEntry)
|
||||
require.True(t, ok)
|
||||
|
||||
// check hash is correct in entry
|
||||
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, value, cacheValue)
|
||||
})
|
||||
|
||||
t.Run("Call", func(t *testing.T) {
|
||||
// check cache empty
|
||||
assert.Equal(t, 0, p.vfsCache.Entries())
|
||||
defer p.vfsCache.Clear()
|
||||
|
||||
vfs, vfsKey, err := p.Call(testUser, testPass)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
|
||||
assert.True(t, ok)
|
||||
cacheEntry, ok := cacheValue.(cacheEntry)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, vfs, cacheEntry.vfs)
|
||||
|
||||
// Test Get works while we have something in the cache
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
assert.Equal(t, vfs, p.Get(testUser))
|
||||
assert.Nil(t, p.Get("unknown"))
|
||||
})
|
||||
|
||||
// now try again from the cache
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
|
||||
// now try again from the cache but with wrong password
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass+"wrong")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "incorrect password")
|
||||
require.Nil(t, vfs)
|
||||
require.Equal(t, "", vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
// Package proxyflags implements command line flags to set up a proxy
|
||||
package proxyflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = proxy.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.")
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
|
||||
Server: httplib.NewServer(mux, opt),
|
||||
f: f,
|
||||
}
|
||||
mux.HandleFunc(s.Opt.BaseURL+"/", s.handler)
|
||||
mux.HandleFunc("/", s.handler)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -211,10 +211,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Server", "rclone/"+fs.Version)
|
||||
|
||||
path, ok := s.Path(w, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
path := r.URL.Path
|
||||
remote := makeRemote(path)
|
||||
fs.Debugf(s.f, "%s %s", r.Method, path)
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestRestic(t *testing.T) {
|
||||
|
||||
fstest.Initialise()
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote()
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
// A simple auth proxy for testing purposes
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
log.Fatalf("Syntax: %s <root>", os.Args[0])
|
||||
}
|
||||
root := os.Args[1]
|
||||
|
||||
// Read the input
|
||||
var in map[string]string
|
||||
err := json.NewDecoder(os.Stdin).Decode(&in)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Write the output
|
||||
var out = map[string]string{
|
||||
"type": "local",
|
||||
"_root": root,
|
||||
"_obscure": "pass",
|
||||
}
|
||||
json.NewEncoder(os.Stdout).Encode(&out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -1,107 +0,0 @@
|
||||
// Package servetest provides infrastructure for running loopback
|
||||
// tests of "rclone serve backend:" against the backend integration
|
||||
// tests.
|
||||
package servetest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// StartFn describes the callback which should start the server with
|
||||
// the Fs passed in.
|
||||
// It should return a config for the backend used to connect to the
|
||||
// server and a clean up function
|
||||
type StartFn func(f fs.Fs) (configmap.Simple, func())
|
||||
|
||||
// run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func run(t *testing.T, name string, start StartFn, useProxy bool) {
|
||||
fstest.Initialise()
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote()
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
f := fremote
|
||||
if useProxy {
|
||||
// If using a proxy don't pass in the backend
|
||||
f = nil
|
||||
|
||||
// the backend config will be made by the proxy
|
||||
prog, err := filepath.Abs("../servetest/proxy_code.go")
|
||||
require.NoError(t, err)
|
||||
cmd := "go run " + prog + " " + fremote.Root()
|
||||
|
||||
// FIXME this is untidy setting a global variable!
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
}
|
||||
config, cleanup := start(f)
|
||||
defer cleanup()
|
||||
|
||||
// Change directory to run the tests
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
err = os.Chdir("../../../backend/" + name)
|
||||
require.NoError(t, err, "failed to cd to "+name+" backend")
|
||||
defer func() {
|
||||
// Change back to the old directory
|
||||
require.NoError(t, os.Chdir(cwd))
|
||||
}()
|
||||
|
||||
// Run the backend tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
remoteName := name + "test:"
|
||||
args = append(args, "-remote", remoteName)
|
||||
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
// Configure the backend with environment variables
|
||||
cmd.Env = os.Environ()
|
||||
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
|
||||
for k, v := range config {
|
||||
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
|
||||
}
|
||||
|
||||
// Run the test
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running "+name+" integration tests")
|
||||
}
|
||||
|
||||
// Run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func Run(t *testing.T, name string, start StartFn) {
|
||||
t.Run("Normal", func(t *testing.T) {
|
||||
run(t, name, start, false)
|
||||
})
|
||||
t.Run("AuthProxy", func(t *testing.T) {
|
||||
run(t, name, start, true)
|
||||
})
|
||||
}
|
||||
@@ -47,6 +47,7 @@ func shellUnEscape(str string) string {
|
||||
// Info about the current connection
|
||||
type conn struct {
|
||||
vfs *vfs.VFS
|
||||
f fs.Fs
|
||||
handlers sftp.Handlers
|
||||
what string
|
||||
}
|
||||
@@ -64,7 +65,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
|
||||
switch binary {
|
||||
case "df":
|
||||
about := c.vfs.Fs().Features().About
|
||||
about := c.f.Features().About
|
||||
if about == nil {
|
||||
return errors.New("df not supported")
|
||||
}
|
||||
@@ -97,33 +98,22 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
if binary == "sha1sum" {
|
||||
ht = hash.SHA1
|
||||
}
|
||||
var hashSum string
|
||||
if args == "" {
|
||||
// empty hash for no input
|
||||
if ht == hash.MD5 {
|
||||
hashSum = "d41d8cd98f00b204e9800998ecf8427e"
|
||||
} else {
|
||||
hashSum = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
|
||||
}
|
||||
args = "-"
|
||||
} else {
|
||||
node, err := c.vfs.Stat(args)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "hash failed finding file %q", args)
|
||||
}
|
||||
if node.IsDir() {
|
||||
return errors.New("can't hash directory")
|
||||
}
|
||||
o, ok := node.DirEntry().(fs.ObjectInfo)
|
||||
if !ok {
|
||||
return errors.New("unexpected non file")
|
||||
}
|
||||
hashSum, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hash failed")
|
||||
}
|
||||
node, err := c.vfs.Stat(args)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "hash failed finding file %q", args)
|
||||
}
|
||||
_, err = fmt.Fprintf(out, "%s %s\n", hashSum, args)
|
||||
if node.IsDir() {
|
||||
return errors.New("can't hash directory")
|
||||
}
|
||||
o, ok := node.DirEntry().(fs.ObjectInfo)
|
||||
if !ok {
|
||||
return errors.New("unexpected non file")
|
||||
}
|
||||
hash, err := o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hash failed")
|
||||
}
|
||||
_, err = fmt.Fprintf(out, "%s %s\n", hash, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "send output failed")
|
||||
}
|
||||
@@ -131,7 +121,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
// special cases for rclone command detection
|
||||
switch args {
|
||||
case "'abc' | md5sum":
|
||||
if c.vfs.Fs().Hashes().Contains(hash.MD5) {
|
||||
if c.f.Hashes().Contains(hash.MD5) {
|
||||
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "send output failed")
|
||||
@@ -140,7 +130,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
return errors.New("md5 hash not supported")
|
||||
}
|
||||
case "'abc' | sha1sum":
|
||||
if c.vfs.Fs().Hashes().Contains(hash.SHA1) {
|
||||
if c.f.Hashes().Contains(hash.SHA1) {
|
||||
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "send output failed")
|
||||
@@ -178,7 +168,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
||||
}
|
||||
defer func() {
|
||||
err := channel.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
fs.Debugf(c.what, "Failed to close channel: %v", err)
|
||||
}
|
||||
}()
|
||||
@@ -229,7 +219,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
||||
server := sftp.NewRequestServer(channel, c.handlers)
|
||||
defer func() {
|
||||
err := server.Close()
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil {
|
||||
fs.Debugf(c.what, "Failed to close server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -19,14 +19,14 @@ type vfsHandler struct {
|
||||
}
|
||||
|
||||
// vfsHandler returns a Handlers object with the test handlers.
|
||||
func newVFSHandler(vfs *vfs.VFS) sftp.Handlers {
|
||||
func newVFSHandler(vfs *vfs.VFS) (sftp.Handlers, error) {
|
||||
v := vfsHandler{VFS: vfs}
|
||||
return sftp.Handlers{
|
||||
FileGet: v,
|
||||
FilePut: v,
|
||||
FileCmd: v,
|
||||
FileList: v,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) {
|
||||
|
||||
@@ -18,8 +18,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -34,47 +33,21 @@ type server struct {
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
config *ssh.ServerConfig
|
||||
handlers sftp.Handlers
|
||||
listener net.Listener
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
proxy *proxy.Proxy
|
||||
}
|
||||
|
||||
func newServer(f fs.Fs, opt *Options) *server {
|
||||
s := &server{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
opt: *opt,
|
||||
waitChan: make(chan struct{}),
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(&proxyflags.Opt)
|
||||
} else {
|
||||
s.vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// getVFS gets the vfs from s or the proxy
|
||||
func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
if s.proxy == nil {
|
||||
return s.vfs
|
||||
}
|
||||
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
|
||||
fs.Infof(what, "SSH Permissions Extensions not found")
|
||||
return nil
|
||||
}
|
||||
key := sshConn.Permissions.Extensions["_vfsKey"]
|
||||
if key == "" {
|
||||
fs.Infof(what, "VFS key not found")
|
||||
return nil
|
||||
}
|
||||
VFS = s.proxy.Get(key)
|
||||
if VFS == nil {
|
||||
fs.Infof(what, "failed to read VFS from cache")
|
||||
return nil
|
||||
}
|
||||
return VFS
|
||||
}
|
||||
|
||||
func (s *server) acceptConnections() {
|
||||
for {
|
||||
nConn, err := s.listener.Accept()
|
||||
@@ -100,15 +73,11 @@ func (s *server) acceptConnections() {
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
vfs: s.vfs,
|
||||
f: s.f,
|
||||
handlers: s.handlers,
|
||||
what: what,
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
continue
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
@@ -130,8 +99,8 @@ func (s *server) serve() (err error) {
|
||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||
}
|
||||
|
||||
if !s.opt.NoAuth && len(authorizedKeysMap) == 0 && s.opt.User == "" && s.opt.Pass == "" && s.proxy == nil {
|
||||
return errors.New("no authorization found, use --user/--pass or --authorized-keys or --no-auth or --auth-proxy")
|
||||
if !s.opt.NoAuth && len(authorizedKeysMap) == 0 && s.opt.User == "" && s.opt.Pass == "" {
|
||||
return errors.New("no authorization found, use --user/--pass or --authorized-keys or --no-auth")
|
||||
}
|
||||
|
||||
// An SSH server is represented by a ServerConfig, which holds
|
||||
@@ -140,19 +109,7 @@ func (s *server) serve() (err error) {
|
||||
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
||||
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
|
||||
if s.proxy != nil {
|
||||
// query the proxy for the config
|
||||
_, vfsKey, err := s.proxy.Call(c.User(), string(pass))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// just return the Key so we can get it back from the cache
|
||||
return &ssh.Permissions{
|
||||
Extensions: map[string]string{
|
||||
"_vfsKey": vfsKey,
|
||||
},
|
||||
}, nil
|
||||
} else if s.opt.User != "" && s.opt.Pass != "" {
|
||||
if s.opt.User != "" && s.opt.Pass != "" {
|
||||
userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User))
|
||||
passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass))
|
||||
if (userOK & passOK) == 1 {
|
||||
@@ -163,9 +120,6 @@ func (s *server) serve() (err error) {
|
||||
},
|
||||
PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
|
||||
fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User())
|
||||
if s.proxy != nil {
|
||||
return nil, errors.New("public key login not allowed when using auth proxy")
|
||||
}
|
||||
if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok {
|
||||
return &ssh.Permissions{
|
||||
// Record the public key used for authentication.
|
||||
@@ -224,6 +178,11 @@ func (s *server) serve() (err error) {
|
||||
}
|
||||
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
|
||||
|
||||
s.handlers, err = newVFSHandler(s.vfs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serve sftp: failed to create fs")
|
||||
}
|
||||
|
||||
go s.acceptConnections()
|
||||
|
||||
return nil
|
||||
|
||||
@@ -6,9 +6,6 @@ package sftp
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
@@ -49,7 +46,6 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
|
||||
|
||||
func init() {
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
AddFlags(Command.Flags(), &Opt)
|
||||
}
|
||||
|
||||
@@ -88,15 +84,10 @@ reachable externally then supply "--addr :2022" for example.
|
||||
Note that the default of "--vfs-cache-mode off" is fine for the rclone
|
||||
sftp backend, but it may not be with other SFTP clients.
|
||||
|
||||
` + vfs.Help + proxy.Help,
|
||||
` + vfs.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s := newServer(f, &Opt)
|
||||
err := s.Serve()
|
||||
|
||||
@@ -8,15 +8,16 @@
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -37,35 +38,58 @@ var (
|
||||
// TestSftp runs the sftp server then runs the unit tests for the
|
||||
// sftp remote against it.
|
||||
func TestSftp(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.User = testUser
|
||||
opt.Pass = testPass
|
||||
fstest.Initialise()
|
||||
|
||||
w := newServer(f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
// Read the host and port we started on
|
||||
addr := w.Addr()
|
||||
colon := strings.LastIndex(addr, ":")
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "sftp",
|
||||
"user": testUser,
|
||||
"pass": obscure.MustObscure(testPass),
|
||||
"host": addr[:colon],
|
||||
"port": addr[colon+1:],
|
||||
}
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.User = testUser
|
||||
opt.Pass = testPass
|
||||
|
||||
// return a stop function
|
||||
return config, func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}
|
||||
// Start the server
|
||||
w := newServer(fremote, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
defer func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/sftp")
|
||||
assert.NoError(t, err, "failed to cd to sftp backend")
|
||||
|
||||
// Run the sftp tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
|
||||
servetest.Run(t, "sftp", start)
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-remote", "sftptest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
addr := w.Addr()
|
||||
colon := strings.LastIndex(addr, ":")
|
||||
if colon < 0 {
|
||||
panic("need a : in the address: " + addr)
|
||||
}
|
||||
host, port := addr[:colon], addr[colon+1:]
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_SFTPTEST_TYPE=sftp",
|
||||
"RCLONE_CONFIG_SFTPTEST_HOST="+host,
|
||||
"RCLONE_CONFIG_SFTPTEST_PORT="+port,
|
||||
"RCLONE_CONFIG_SFTPTEST_USER="+testUser,
|
||||
"RCLONE_CONFIG_SFTPTEST_PASS="+obscure.MustObscure(testPass),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running sftp integration tests")
|
||||
}
|
||||
|
||||
@@ -12,11 +12,9 @@ import (
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/serve"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/errors"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -32,7 +30,6 @@ var (
|
||||
func init() {
|
||||
httpflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory")
|
||||
}
|
||||
@@ -60,15 +57,10 @@ supported hash on the backend or you can use a named hash such as
|
||||
|
||||
Use "rclone hashsum" to see the full list.
|
||||
|
||||
` + httplib.Help + vfs.Help + proxy.Help,
|
||||
` + httplib.Help + vfs.Help,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
hashType = hash.None
|
||||
if hashName == "auto" {
|
||||
hashType = f.Hashes().GetOne()
|
||||
@@ -109,9 +101,8 @@ Use "rclone hashsum" to see the full list.
|
||||
type WebDAV struct {
|
||||
*httplib.Server
|
||||
f fs.Fs
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
vfs *vfs.VFS
|
||||
webdavhandler *webdav.Handler
|
||||
proxy *proxy.Proxy
|
||||
}
|
||||
|
||||
// check interface
|
||||
@@ -120,58 +111,21 @@ var _ webdav.FileSystem = (*WebDAV)(nil)
|
||||
// Make a new WebDAV to serve the remote
|
||||
func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV {
|
||||
w := &WebDAV{
|
||||
f: f,
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
w.proxy = proxy.New(&proxyflags.Opt)
|
||||
// override auth
|
||||
copyOpt := *opt
|
||||
copyOpt.Auth = w.auth
|
||||
opt = ©Opt
|
||||
} else {
|
||||
w._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
|
||||
webdavHandler := &webdav.Handler{
|
||||
Prefix: w.Server.Opt.BaseURL,
|
||||
FileSystem: w,
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: w.logRequest, // FIXME
|
||||
}
|
||||
w.webdavhandler = webdavHandler
|
||||
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
|
||||
return w
|
||||
}
|
||||
|
||||
// Gets the VFS in use for this request
|
||||
func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if w._vfs != nil {
|
||||
return w._vfs, nil
|
||||
}
|
||||
value := ctx.Value(httplib.ContextAuthKey)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
VFS, ok := value.(*vfs.VFS)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("context value is not VFS: %#v", value)
|
||||
}
|
||||
return VFS, nil
|
||||
}
|
||||
|
||||
// auth does proxy authorization
|
||||
func (w *WebDAV) auth(user, pass string) (value interface{}, err error) {
|
||||
VFS, _, err := w.proxy.Call(user, pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
|
||||
urlPath, ok := w.Path(rw, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
urlPath := r.URL.Path
|
||||
isDir := strings.HasSuffix(urlPath, "/")
|
||||
remote := strings.Trim(urlPath, "/")
|
||||
if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
|
||||
@@ -184,14 +138,8 @@ func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
|
||||
// serveDir serves a directory index at dirRemote
|
||||
// This is similar to serveDir in serve http.
|
||||
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
VFS, err := w.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(rw, "Root directory not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve directory: %v", err)
|
||||
return
|
||||
}
|
||||
// List the directory
|
||||
node, err := VFS.Stat(dirRemote)
|
||||
node, err := w.vfs.Stat(dirRemote)
|
||||
if err == vfs.ENOENT {
|
||||
http.Error(rw, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
@@ -238,12 +186,8 @@ func (w *WebDAV) logRequest(r *http.Request, err error) {
|
||||
|
||||
// Mkdir creates a directory
|
||||
func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) {
|
||||
// defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dir, leaf, err := VFS.StatParent(name)
|
||||
defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
|
||||
dir, leaf, err := w.vfs.StatParent(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -253,12 +197,8 @@ func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err
|
||||
|
||||
// OpenFile opens a file or a directory
|
||||
func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) {
|
||||
// defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := VFS.OpenFile(name, flags, perm)
|
||||
defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
|
||||
f, err := w.vfs.OpenFile(name, flags, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -267,12 +207,8 @@ func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.F
|
||||
|
||||
// RemoveAll removes a file or a directory and its contents
|
||||
func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
|
||||
// defer log.Trace(name, "")("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node, err := VFS.Stat(name)
|
||||
defer log.Trace(name, "")("err = %v", &err)
|
||||
node, err := w.vfs.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,22 +221,14 @@ func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
|
||||
|
||||
// Rename a file or a directory
|
||||
func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) {
|
||||
// defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VFS.Rename(oldName, newName)
|
||||
defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
|
||||
return w.vfs.Rename(oldName, newName)
|
||||
}
|
||||
|
||||
// Stat returns info about the file or directory
|
||||
func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) {
|
||||
// defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err = VFS.Stat(name)
|
||||
defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
|
||||
fi, err = w.vfs.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -342,7 +270,7 @@ type FileInfo struct {
|
||||
|
||||
// ETag returns an ETag for the FileInfo
|
||||
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
|
||||
defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
|
||||
if hashType == hash.None {
|
||||
return "", webdav.ErrNotImplemented
|
||||
}
|
||||
@@ -365,7 +293,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
|
||||
// ContentType returns a content type for the FileInfo
|
||||
func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) {
|
||||
// defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
|
||||
defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
|
||||
node, ok := (fi.FileInfo).(vfs.Node)
|
||||
if !ok {
|
||||
fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo)
|
||||
|
||||
@@ -8,22 +8,21 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/webdav"
|
||||
@@ -31,8 +30,6 @@ import (
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testUser = "user"
|
||||
testPass = "pass"
|
||||
)
|
||||
|
||||
// check interfaces
|
||||
@@ -45,34 +42,50 @@ var (
|
||||
// TestWebDav runs the webdav server then runs the unit tests for the
|
||||
// webdav remote against it.
|
||||
func TestWebDav(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.BasicUser = testUser
|
||||
opt.BasicPass = testPass
|
||||
hashType = hash.MD5
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
|
||||
// Start the server
|
||||
w := newWebDAV(f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
fstest.Initialise()
|
||||
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "webdav",
|
||||
"vendor": "other",
|
||||
"url": w.Server.URL(),
|
||||
"user": testUser,
|
||||
"pass": obscure.MustObscure(testPass),
|
||||
}
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
return config, func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Start the server
|
||||
w := newWebDAV(fremote, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
defer func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/webdav")
|
||||
assert.NoError(t, err, "failed to cd to webdav remote")
|
||||
|
||||
// Run the webdav tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
|
||||
servetest.Run(t, "webdav", start)
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-remote", "webdavtest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
|
||||
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running webdav integration tests")
|
||||
}
|
||||
|
||||
// Test serve http functionality in serve webdav
|
||||
@@ -84,6 +97,10 @@ var (
|
||||
)
|
||||
|
||||
func TestHTTPFunction(t *testing.T) {
|
||||
// cd to correct directory for testing
|
||||
err := os.Chdir("../../cmd/serve/webdav")
|
||||
assert.NoError(t, err, "failed to cd to webdav cmd directory")
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
require.NoError(t, filter.Active.AddRule("- hidden.txt"))
|
||||
require.NoError(t, filter.Active.AddRule("- hidden/**"))
|
||||
|
||||
@@ -6,7 +6,10 @@ date: "2017-09-25"
|
||||
groups: ["about"]
|
||||
---
|
||||
|
||||
# Rclone - rsync for cloud storage
|
||||
Rclone
|
||||
======
|
||||
|
||||
[](https://rclone.org/)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from:
|
||||
|
||||
@@ -17,7 +20,6 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
* {{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
* {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
* {{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
|
||||
* {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
* {{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
@@ -42,11 +44,10 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Oracle Cloud Storage" home="https://cloud.oracle.com/storage-opc" config="/swift/" >}}
|
||||
* {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}}
|
||||
* {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}}
|
||||
* {{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}}
|
||||
* {{< provider name="put.io" home="https://put.io/" config="/putio/" >}}
|
||||
* {{< provider name="put.io" home="https://put.io/" config="/webdav/#put-io" >}}
|
||||
* {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
* {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
* {{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
* {{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/" >}}
|
||||
* {{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
* {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SFTP" config="/sftp/" >}}
|
||||
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
@@ -69,11 +70,10 @@ Features
|
||||
* Optional FUSE mount ([rclone mount](/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[dlna](/commands/rclone_serve_dlna/)
|
||||
* Experimental [Web based GUI](/gui/)
|
||||
|
||||
Links
|
||||
|
||||
* <i class="fa fa-home"></i> [Home page](https://rclone.org/)
|
||||
* <i class="fab fa-github"></i> [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
* <i class="fa fa-github"></i> [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
* <i class="fa fa-comments"></i> [Rclone Forum](https://forum.rclone.org)
|
||||
* <i class="fas fa-cloud-download-alt"></i>[Downloads](/downloads/)
|
||||
* <i class="fa fa-cloud-download"></i>[Downloads](/downloads/)
|
||||
|
||||
@@ -41,11 +41,51 @@ n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Alias for an existing remote
|
||||
1 / Alias for an existing remote
|
||||
\ "alias"
|
||||
[snip]
|
||||
Storage> alias
|
||||
2 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
3 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
4 / Backblaze B2
|
||||
\ "b2"
|
||||
5 / Box
|
||||
\ "box"
|
||||
6 / Cache a remote
|
||||
\ "cache"
|
||||
7 / Dropbox
|
||||
\ "dropbox"
|
||||
8 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
9 / FTP Connection
|
||||
\ "ftp"
|
||||
10 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
11 / Google Drive
|
||||
\ "drive"
|
||||
12 / Hubic
|
||||
\ "hubic"
|
||||
13 / Local Disk
|
||||
\ "local"
|
||||
14 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
15 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
16 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
17 / Pcloud
|
||||
\ "pcloud"
|
||||
18 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
19 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
20 / Webdav
|
||||
\ "webdav"
|
||||
21 / Yandex Disk
|
||||
\ "yandex"
|
||||
22 / http Connection
|
||||
\ "http"
|
||||
Storage> 1
|
||||
Remote or path to alias.
|
||||
Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path".
|
||||
remote> /mnt/storage/backup
|
||||
|
||||
@@ -4,7 +4,7 @@ description: "Rclone docs for Amazon Drive"
|
||||
date: "2017-06-10"
|
||||
---
|
||||
|
||||
<i class="fab fa-amazon"></i> Amazon Drive
|
||||
<i class="fa fa-amazon"></i> Amazon Drive
|
||||
-----------------------------------------
|
||||
|
||||
Amazon Drive, formerly known as Amazon Cloud Drive, is a cloud storage
|
||||
@@ -65,11 +65,35 @@ n/r/c/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Amazon Drive
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
[snip]
|
||||
Storage> amazon cloud drive
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
6 / FTP Connection
|
||||
\ "ftp"
|
||||
7 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
8 / Google Drive
|
||||
\ "drive"
|
||||
9 / Hubic
|
||||
\ "hubic"
|
||||
10 / Local Disk
|
||||
\ "local"
|
||||
11 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
12 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
13 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
14 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 1
|
||||
Amazon Application Client Id - required.
|
||||
client_id> your client ID goes here
|
||||
Amazon Application Client Secret - required.
|
||||
|
||||
@@ -266,7 +266,7 @@ Contributors
|
||||
* Aleksandar Jankovic <office@ajankovic.com>
|
||||
* Maran <maran@protonmail.com>
|
||||
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
|
||||
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
|
||||
* Laura Hausmann <zotan@zotan.pw>
|
||||
* yparitcher <y@paritcher.com>
|
||||
* AbelThar <abela.tharen@gmail.com>
|
||||
* Matti Niemenmaa <matti.niemenmaa+git@iki.fi>
|
||||
@@ -277,10 +277,3 @@ Contributors
|
||||
* EliEron <subanimehd@gmail.com>
|
||||
* justina777 <chiahuei.lin@gmail.com>
|
||||
* Chaitanya Bankanhal <bchaitanya15@gmail.com>
|
||||
* Michał Matczuk <michal@scylladb.com>
|
||||
* Macavirus <macavirus@zoho.com>
|
||||
* Abhinav Sharma <abhi18av@users.noreply.github.com>
|
||||
* ginvine <34869051+ginvine@users.noreply.github.com>
|
||||
* Patrick Wang <mail6543210@yahoo.com.tw>
|
||||
* Cenk Alti <cenkalti@gmail.com>
|
||||
* Andreas Chlupka <andy@chlupka.com>
|
||||
|
||||
@@ -4,7 +4,7 @@ description: "Rclone docs for Microsoft Azure Blob Storage"
|
||||
date: "2017-07-30"
|
||||
---
|
||||
|
||||
<i class="fab fa-windows"></i> Microsoft Azure Blob Storage
|
||||
<i class="fa fa-windows"></i> Microsoft Azure Blob Storage
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:container` (or `remote:` for the `lsd`
|
||||
@@ -27,10 +27,40 @@ n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Microsoft Azure Blob Storage
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Box
|
||||
\ "box"
|
||||
5 / Dropbox
|
||||
\ "dropbox"
|
||||
6 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
7 / FTP Connection
|
||||
\ "ftp"
|
||||
8 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
9 / Google Drive
|
||||
\ "drive"
|
||||
10 / Hubic
|
||||
\ "hubic"
|
||||
11 / Local Disk
|
||||
\ "local"
|
||||
12 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
[snip]
|
||||
13 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
14 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
15 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
16 / Yandex Disk
|
||||
\ "yandex"
|
||||
17 / http Connection
|
||||
\ "http"
|
||||
Storage> azureblob
|
||||
Storage Account Name
|
||||
account> account_name
|
||||
@@ -145,7 +175,7 @@ Here are the standard options specific to azureblob (Microsoft Azure Blob Storag
|
||||
|
||||
#### --azureblob-account
|
||||
|
||||
Storage Account Name (leave blank to use SAS URL or Emulator)
|
||||
Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCOUNT
|
||||
@@ -154,7 +184,7 @@ Storage Account Name (leave blank to use SAS URL or Emulator)
|
||||
|
||||
#### --azureblob-key
|
||||
|
||||
Storage Account Key (leave blank to use SAS URL or Emulator)
|
||||
Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_AZUREBLOB_KEY
|
||||
@@ -164,22 +194,13 @@ Storage Account Key (leave blank to use SAS URL or Emulator)
|
||||
#### --azureblob-sas-url
|
||||
|
||||
SAS URL for container level access only
|
||||
(leave blank if using account/key or Emulator)
|
||||
(leave blank if using account/key or connection string)
|
||||
|
||||
- Config: sas_url
|
||||
- Env Var: RCLONE_AZUREBLOB_SAS_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --azureblob-use-emulator
|
||||
|
||||
Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
|
||||
|
||||
- Config: use_emulator
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to azureblob (Microsoft Azure Blob Storage).
|
||||
|
||||
@@ -30,11 +30,33 @@ n/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Backblaze B2
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
[snip]
|
||||
Storage> b2
|
||||
4 / Dropbox
|
||||
\ "dropbox"
|
||||
5 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
6 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
7 / Google Drive
|
||||
\ "drive"
|
||||
8 / Hubic
|
||||
\ "hubic"
|
||||
9 / Local Disk
|
||||
\ "local"
|
||||
10 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
11 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
12 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
13 / Yandex Disk
|
||||
\ "yandex"
|
||||
Storage> 3
|
||||
Account ID or Application Key ID
|
||||
account> 123456789abc
|
||||
Application Key
|
||||
@@ -425,7 +447,6 @@ Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
This is probably only useful for a public bucket.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
- Config: download_url
|
||||
@@ -433,17 +454,5 @@ Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --b2-download-auth-duration
|
||||
|
||||
Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.
|
||||
|
||||
- Config: download_auth_duration
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_AUTH_DURATION
|
||||
- Type: Duration
|
||||
- Default: 1w
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
|
||||
@@ -29,10 +29,38 @@ n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Box
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Box
|
||||
\ "box"
|
||||
[snip]
|
||||
5 / Dropbox
|
||||
\ "dropbox"
|
||||
6 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
7 / FTP Connection
|
||||
\ "ftp"
|
||||
8 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
9 / Google Drive
|
||||
\ "drive"
|
||||
10 / Hubic
|
||||
\ "hubic"
|
||||
11 / Local Disk
|
||||
\ "local"
|
||||
12 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
13 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
14 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
15 / Yandex Disk
|
||||
\ "yandex"
|
||||
16 / http Connection
|
||||
\ "http"
|
||||
Storage> box
|
||||
Box App Client Id - leave blank normally.
|
||||
client_id>
|
||||
|
||||
@@ -30,11 +30,11 @@ n/r/c/s/q> n
|
||||
name> test-cache
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
[snip]
|
||||
XX / Cache a remote
|
||||
...
|
||||
5 / Cache a remote
|
||||
\ "cache"
|
||||
[snip]
|
||||
Storage> cache
|
||||
...
|
||||
Storage> 5
|
||||
Remote to cache.
|
||||
Normally should contain a ':' and a path, eg "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user