1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-16 16:23:22 +00:00

Compare commits

..

18 Commits

Author SHA1 Message Date
Nick Craig-Wood
85882fa2de build: unify uploading in CI and add artifacts to Azure Pipelines
- remove compile_all step as we compile everything in the release step
- rename travis_beta to beta
- split into upload_beta and make depend on rclone
- use latest OSes in pipelines build
- Add workaround for cmd/mount tests lockup from #3154
2019-08-06 16:43:50 +01:00
negative0
27a075e9fc rcd: Removed the shorthand for webgui. Shorthand is reserved for rsync compatibility. 2019-08-06 12:50:31 +01:00
Nick Craig-Wood
5065c422b4 lib/random: unify random string generation into random.String
This was factored from fstest as we were including the testing
enviroment into the main binary because of it.

This was causing opening the browser to fail because of 8243ff8bc8.
2019-08-06 12:44:08 +01:00
Nick Craig-Wood
72d5b11d1b serve restic: rename test file to avoid it being linked into main binary 2019-08-06 12:42:52 +01:00
Nick Craig-Wood
526a3347ac rcd: Fix permissions problems on cache directory with web gui download 2019-08-06 12:06:57 +01:00
Nick Craig-Wood
23910ba53b servetest: add tests for --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
ee7101e6af serve: factor out common testing parts for ftp, sftp and webdav tests 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
36c1b37dd9 serve webdav: support --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
72782bdda6 serve ftp: implement --auth-proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
b94eef16c1 serve ftp: refactor to bring into line with other serve commands 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
d75fbe4852 serve sftp: implement auth proxy 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
e6ab237fcd serve: add auth proxy infrastructure 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
a7eec91d69 vfs: add Fs() method to return underlying fs.Fs 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
b3e94b018c cache: factor fs cache into lib/cache 2019-08-06 11:43:42 +01:00
Nick Craig-Wood
ca0e9ea55d build: add Azure Pipelines build status to README 2019-08-06 10:46:36 +01:00
Nick Craig-Wood
53e3c2e263 build: add azure pipelines build 2019-08-06 10:31:32 +01:00
Nick Craig-Wood
02eb747d71 serve http/webdav/restic: implement --prefix - fixes #3398
--prefix enables the servers to serve from a non root prefix.  This
enables easier proxying.
2019-08-06 10:30:48 +01:00
Chaitanya Bankanhal
d51a970932 rcd: Change URL after webgui move to rclone organization 2019-08-05 16:22:40 +01:00
47 changed files with 1731 additions and 632 deletions

View File

@@ -46,4 +46,4 @@ artifacts:
- path: build/*-v*.zip - path: build/*-v*.zip
deploy_script: deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload - IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make upload_beta

View File

@@ -84,7 +84,6 @@ matrix:
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"' - BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script: script:
- make - make
- make compile_all
- go: 1.12.x - go: 1.12.x
name: macOS name: macOS
os: osx os: osx
@@ -120,9 +119,11 @@ matrix:
deploy: deploy:
provider: script provider: script
script: make travis_beta script:
- make beta
- [[ "$TRAVIS_PULL_REQUEST" == "false" ]] && make upload_beta
skip_cleanup: true skip_cleanup: true
on: on:
repo: rclone/rclone repo: rclone/rclone
all_branches: true all_branches: true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true condition: $DEPLOY == true

View File

@@ -17,7 +17,10 @@ ifneq ($(TAG),$(LAST_TAG))
endif endif
GO_VERSION := $(shell go version) GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ ) GO_FILES := $(shell go list ./... | grep -v /vendor/ )
BETA_PATH := $(BRANCH_PATH)$(TAG) ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/ BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH) BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
@@ -142,7 +145,7 @@ upload_github:
cross: doc cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
beta: test_beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG) go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG) rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/ @echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -153,13 +156,6 @@ log_since_last_release:
compile_all: compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload: circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH ifndef BRANCH_PATH
@@ -167,15 +163,17 @@ ifndef BRANCH_PATH
endif endif
@echo Beta release ready at $(BETA_URL)/testbuilds @echo Beta release ready at $(BETA_URL)/testbuilds
travis_beta: beta:
ifeq ($(TRAVIS_OS_NAME),linux) ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz' go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif endif
git log $(LAST_TAG).. > /tmp/git-log.txt git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG) go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
upload_beta: rclone
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT) rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
endif endif
@echo Beta release ready at $(BETA_URL) @echo Beta release ready at $(BETA_URL)

View File

@@ -10,6 +10,7 @@
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone) [![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master) [![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) [![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone) [![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)

View File

@@ -1,16 +1,33 @@
--- ---
# Azure pipelines build for rclone # Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy # Parts stolen shamelessly from all round the Internet, especially Caddy
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
trigger: trigger:
branches:
include:
- '*'
tags: tags:
include: include:
- '*' - '*'
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
GOMAXPROCS: 8 # workaround for cmd/mount tests locking up - see #3154
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
GO_INSTALL_ARCH: amd64
strategy: strategy:
matrix: matrix:
linux: linux:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: latest GO_VERSION: latest
GOTAGS: cmount GOTAGS: cmount
@@ -19,67 +36,63 @@ strategy:
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
DEPLOY: true DEPLOY: true
mac: mac:
imageName: macos-10.13 imageName: macos-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: latest GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo' BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true MAKE_RACEQUICKTEST: true
DEPLOY: true DEPLOY: true
windows: windows_amd64:
imageName: windows-2019 imageName: windows-latest
gorootDir: C:\ gorootDir: C:\
GO_VERSION: latest GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet BUILD_FLAGS: '-include "^windows/amd64" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
windows_386:
imageName: windows-latest
gorootDir: C:\
GO_VERSION: latest
GO_INSTALL_ARCH: 386
BUILD_FLAGS: '-include "^windows/386" -cgo'
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
DEPLOY: true DEPLOY: true
other_os: other_os:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: latest GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"' BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true DEPLOY: true
modules_race: modules_race:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: latest GO_VERSION: latest
GO111MODULE: on GO111MODULE: on
GOPROXY: https://proxy.golang.org GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
RACEMAKE_QUICKTEST: true MAKE_RACEQUICKTEST: true
go1.9: go1.9:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GOCACHE: '' # build caching only came in go1.10 GOCACHE: '' # build caching only came in go1.10
GO_VERSION: go1.9.7 GO_VERSION: go1.9.7
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
go1.10: go1.10:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: go1.10.8 GO_VERSION: go1.10.8
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
go1.11: go1.11:
imageName: ubuntu-16.04 imageName: ubuntu-latest
gorootDir: /usr/local gorootDir: /usr/local
GO_VERSION: go1.11.8 GO_VERSION: go1.11.12
MAKE_QUICKTEST: true MAKE_QUICKTEST: true
pool: pool:
vmImage: $(imageName) vmImage: $(imageName)
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
steps: steps:
- bash: | - bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text") latestGo=$(curl "https://golang.org/VERSION?m=text")
@@ -102,20 +115,11 @@ steps:
- task: CacheBeta@0 - task: CacheBeta@0
continueOnError: true continueOnError: true
inputs: inputs:
key: go-build-cache | $(Agent.JobName) key: go-build-cache | "$(Agent.JobName)"
path: $(GOCACHE) path: $(GOCACHE)
displayName: Cache go build displayName: Cache go build
condition: ne( variables['GOCACHE'], '' ) condition: ne( variables['GOCACHE'], '' )
- bash: |
mkdir -p $(GOCACHE)
echo "not empty" > $(GOCACHE)/not_empty.txt
echo "GOCACHE=" $(GOCACHE)
ls -R $(GOCACHE)
continueOnError: true
displayName: Create cache dir
condition: ne( variables['GOCACHE'], '' )
# Install Libraries (varies by platform) # Install Libraries (varies by platform)
- bash: | - bash: |
@@ -134,32 +138,43 @@ steps:
displayName: Install Libraries on macOS displayName: Install Libraries on macOS
- powershell: | - powershell: |
choco install -y winfsp zip make $ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GO_INSTALL_ARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
condition: eq( variables['Agent.OS'], 'Windows_NT' ) condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Libraries on Windows displayName: Install Libraries on Windows
# Install Go (this varies by platform) # Install Go (this varies by platform)
- bash: | - bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz" wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
sudo mkdir $(gorootDir) sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir) sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz" tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' ) condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Go on Linux displayName: Install Go on Linux
- bash: | - bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz" wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz" sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' ) condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Go on macOS displayName: Install Go on macOS
- powershell: | - powershell: |
Write-Host "Downloading Go... (please be patient, I am very slow)" $ProgressPreference = 'SilentlyContinue'
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip") Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
Write-Host "Extracting Go... (I'm slow too)" (New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)" Write-Host "Extracting Go"
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' ) condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Go on Windows displayName: Install Go on Windows
@@ -170,17 +185,16 @@ steps:
printf "Go version: $(go version)\n" printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n" printf "\n\nGo environment:\n\n"
go env go env
printf "\n\nSystem environment:\n\n"
env
printf "\n\nRclone environment:\n\n" printf "\n\nRclone environment:\n\n"
make vars make vars
printf "\n\nSystem environment:\n\n"
env
workingDirectory: '$(modulePath)' workingDirectory: '$(modulePath)'
displayName: Print Go version and environment displayName: Print Go version and environment
# Run Tests # Run Tests
- bash: | - bash: |
make
make quicktest make quicktest
workingDirectory: '$(modulePath)' workingDirectory: '$(modulePath)'
displayName: Run tests displayName: Run tests
@@ -190,7 +204,7 @@ steps:
make racequicktest make racequicktest
workingDirectory: '$(modulePath)' workingDirectory: '$(modulePath)'
displayName: Race test displayName: Race test
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' ) condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
- bash: | - bash: |
make build_dep make build_dep
@@ -200,13 +214,21 @@ steps:
condition: eq( variables['MAKE_CHECK'], 'true' ) condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: | - bash: |
make compile_all make beta
workingDirectory: '$(modulePath)' workingDirectory: '$(modulePath)'
displayName: Compile all architectures test displayName: Do release build
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' ) condition: eq( variables['DEPLOY'], 'true' )
- bash: | - bash: |
make vars # FIXME travis_beta make upload_beta
env:
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
workingDirectory: '$(modulePath)' workingDirectory: '$(modulePath)'
displayName: Deploy built binaries displayName: Upload built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) ) condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
- publish: $(modulePath)/build
artifact: "rclone-build-$(Agent.JobName)"
displayName: Publish built binaries
condition: eq( variables['DEPLOY'], 'true' )

View File

@@ -33,6 +33,7 @@ import (
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -355,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64) testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err) require.NoError(t, err)
} else { } else {
testData1 = []byte(fstest.RandomString(100)) testData1 = []byte(random.String(100))
testData2 = []byte(fstest.RandomString(200)) testData2 = []byte(random.String(200))
} }
// write the object // write the object

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) { t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + fstest.RandomString(24) albumName := "album/rclone-test-" + random.String(24)
err = f.Mkdir(ctx, albumName) err = f.Mkdir(ctx, albumName)
require.NoError(t, err) require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum remote := albumName + "/" + fileNameAlbum

25
bin/test_proxy.py Executable file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env python3
"""
A demo proxy for rclone serve sftp/webdav/ftp etc
This takes the incoming user/pass and converts it into an sftp backend
running on localhost.
"""
import sys
import json
def main():
i = json.load(sys.stdin)
o = {
"type": "sftp", # type of backend
"_root": "", # root of the fs
"_obscure": "pass", # comma sep list of fields to obscure
"user": i["user"],
"pass": i["pass"],
"host": "127.0.0.1",
}
json.dump(o, sys.stdout, indent="\t")
if __name__ == "__main__":
main()

View File

@@ -9,6 +9,7 @@ package cmd
import ( import (
"fmt" "fmt"
"log" "log"
"math/rand"
"os" "os"
"os/exec" "os/exec"
"path" "path"
@@ -492,6 +493,7 @@ func AddBackendFlags() {
// Main runs rclone interpreting flags and commands out of os.Args // Main runs rclone interpreting flags and commands out of os.Args
func Main() { func Main() {
rand.Seed(time.Now().Unix())
setupRootCommand(Root) setupRootCommand(Root)
AddBackendFlags() AddBackendFlags()
if err := Root.Execute(); err != nil { if err := Root.Execute(); err != nil {

View File

@@ -18,7 +18,7 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -118,7 +118,7 @@ func (r *results) Print() {
// writeFile writes a file with some random contents // writeFile writes a file with some random contents
func (r *results) writeFile(path string) (fs.Object, error) { func (r *results) writeFile(path string) (fs.Object, error) {
contents := fstest.RandomString(50) contents := random.String(50)
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f) src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src) return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
} }

View File

@@ -82,7 +82,7 @@ func checkRelease(shouldUpdate bool) (err error) {
extractPath := filepath.Join(cachePath, "current") extractPath := filepath.Join(cachePath, "current")
if !exists(cachePath) { if !exists(cachePath) {
if err := os.MkdirAll(cachePath, 755); err != nil { if err := os.MkdirAll(cachePath, 0755); err != nil {
fs.Logf(nil, "Error creating cache directory: %s", cachePath) fs.Logf(nil, "Error creating cache directory: %s", cachePath)
} }
} }
@@ -177,14 +177,14 @@ func unzip(src, dest string) (err error) {
path := filepath.Join(dest, f.Name) path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() { if f.FileInfo().IsDir() {
if err := os.MkdirAll(path, f.Mode()); err != nil { if err := os.MkdirAll(path, 0755); err != nil {
return err return err
} }
} else { } else {
if err := os.MkdirAll(filepath.Dir(path), f.Mode()); err != nil { if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err return err
} }
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -5,30 +5,68 @@
package ftp package ftp
import ( import (
"errors" "bytes"
"fmt" "fmt"
"io" "io"
"net" "net"
"os" "os"
"os/user" "os/user"
"runtime"
"strconv" "strconv"
"sync" "sync"
ftp "github.com/goftp/server" ftp "github.com/goftp/server"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/ftp/ftpflags" "github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag"
) )
// Options contains options for the http Server
type Options struct {
//TODO add more options
ListenAddr string // Port to listen on
PublicIP string // Passive ports range
PassivePorts string // Passive ports range
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:2121",
PublicIP: "",
PassivePorts: "30000-32000",
BasicUser: "anonymous",
BasicPass: "",
}
// Opt is options set by command line flags
var Opt = DefaultOpt
// AddFlags adds flags for ftp
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
}
func init() { func init() {
ftpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags()) vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
} }
// Command definition for cobra // Command definition for cobra
@@ -39,12 +77,33 @@ var Command = &cobra.Command{
rclone serve ftp implements a basic ftp server to serve the rclone serve ftp implements a basic ftp server to serve the
remote over FTP protocol. This can be viewed with a ftp client remote over FTP protocol. This can be viewed with a ftp client
or you can make a remote of type ftp to read and write it. or you can make a remote of type ftp to read and write it.
` + ftpopt.Help + vfs.Help,
### Server options
Use --addr to specify which IP address and port the server should
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set --addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
#### Authentication
By default this will serve files without needing a login.
You can set a single username and password with the --user and --pass flags.
` + vfs.Help + proxy.Help,
Run: func(command *cobra.Command, args []string) { Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args) var f fs.Fs
f := cmd.NewFsSrc(args) if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, false, command, func() error { cmd.Run(false, false, command, func() error {
s, err := newServer(f, &ftpflags.Opt) s, err := newServer(f, &Opt)
if err != nil { if err != nil {
return err return err
} }
@@ -55,12 +114,17 @@ or you can make a remote of type ftp to read and write it.
// server contains everything to run the server // server contains everything to run the server
type server struct { type server struct {
f fs.Fs f fs.Fs
srv *ftp.Server srv *ftp.Server
opt Options
vfs *vfs.VFS
proxy *proxy.Proxy
pendingMu sync.Mutex
pending map[string]*Driver // pending Driver~s that haven't got their VFS
} }
// Make a new FTP to serve the remote // Make a new FTP to serve the remote
func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) { func newServer(f fs.Fs, opt *Options) (*server, error) {
host, port, err := net.SplitHostPort(opt.ListenAddr) host, port, err := net.SplitHostPort(opt.ListenAddr)
if err != nil { if err != nil {
return nil, errors.New("Failed to parse host:port") return nil, errors.New("Failed to parse host:port")
@@ -70,27 +134,31 @@ func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
return nil, errors.New("Failed to parse host:port") return nil, errors.New("Failed to parse host:port")
} }
s := &server{
f: f,
opt: *opt,
pending: make(map[string]*Driver),
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(&proxyflags.Opt)
} else {
s.vfs = vfs.New(f, &vfsflags.Opt)
}
ftpopt := &ftp.ServerOpts{ ftpopt := &ftp.ServerOpts{
Name: "Rclone FTP Server", Name: "Rclone FTP Server",
WelcomeMessage: "Welcome on Rclone FTP Server", WelcomeMessage: "Welcome to Rclone " + fs.Version + " FTP Server",
Factory: &DriverFactory{ Factory: s, // implemented by NewDriver method
vfs: vfs.New(f, &vfsflags.Opt), Hostname: host,
}, Port: portNum,
Hostname: host, PublicIp: opt.PublicIP,
Port: portNum, PassivePorts: opt.PassivePorts,
PublicIp: opt.PublicIP, Auth: s, // implemented by CheckPasswd method
PassivePorts: opt.PassivePorts, Logger: &Logger{},
Auth: &Auth{
BasicUser: opt.BasicUser,
BasicPass: opt.BasicPass,
},
Logger: &Logger{},
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts //TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
} }
return &server{ s.srv = ftp.NewServer(ftpopt)
f: f, return s, nil
srv: ftp.NewServer(ftpopt),
}, nil
} }
// serve runs the ftp server // serve runs the ftp server
@@ -132,39 +200,106 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
fs.Infof(sessionID, "< %d %s", code, message) fs.Infof(sessionID, "< %d %s", code, message)
} }
//Auth struct to handle ftp auth (temporary simple for POC) // findID finds the connection ID of the calling program. It does
type Auth struct { // this in an incredibly hacky way by looking in the stack trace.
BasicUser string //
BasicPass string // callerName should be the name of the function that we are looking
// for with a trailing '('
//
// What is really needed is a change of calling protocol so
// CheckPassword is called with the connection.
func findID(callerName []byte) (string, error) {
// Dump the stack in this format
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
buf = buf[:n]
// look for callerName first
i := bytes.Index(buf, callerName)
if i < 0 {
return "", errors.Errorf("findID: caller name not found in:\n%s", buf)
}
buf = buf[i+len(callerName):]
// find next ')'
i = bytes.IndexByte(buf, ')')
if i < 0 {
return "", errors.Errorf("findID: end of args not found in:\n%s", buf)
}
buf = buf[:i]
// trim off first argument
// find next ','
i = bytes.IndexByte(buf, ',')
if i >= 0 {
buf = buf[:i]
}
return string(buf), nil
} }
//CheckPasswd handle auth based on configuration var connServeFunction = []byte("(*Conn).Serve(")
func (a *Auth) CheckPasswd(user, pass string) (bool, error) {
return a.BasicUser == user && (a.BasicPass == "" || a.BasicPass == pass), nil // CheckPasswd handle auth based on configuration
func (s *server) CheckPasswd(user, pass string) (ok bool, err error) {
var VFS *vfs.VFS
if s.proxy != nil {
VFS, _, err = s.proxy.Call(user, pass)
if err != nil {
fs.Infof(nil, "proxy login failed: %v", err)
return false, nil
}
id, err := findID(connServeFunction)
if err != nil {
fs.Infof(nil, "proxy login failed: failed to read ID from stack: %v", err)
return false, nil
}
s.pendingMu.Lock()
d := s.pending[id]
delete(s.pending, id)
s.pendingMu.Unlock()
if d == nil {
return false, errors.Errorf("proxy login failed: failed to find pending Driver under ID %q", id)
}
d.vfs = VFS
} else {
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
if !ok {
fs.Infof(nil, "login failed: bad credentials")
return false, nil
}
}
return true, nil
} }
//DriverFactory factory of ftp driver for each session // NewDriver starts a new session for each client connection
type DriverFactory struct { func (s *server) NewDriver() (ftp.Driver, error) {
vfs *vfs.VFS
}
//NewDriver start a new session
func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
log.Trace("", "Init driver")("") log.Trace("", "Init driver")("")
return &Driver{ d := &Driver{
vfs: f.vfs, s: s,
}, nil vfs: s.vfs, // this can be nil if proxy set
}
return d, nil
} }
//Driver implementation of ftp server //Driver implementation of ftp server
type Driver struct { type Driver struct {
s *server
vfs *vfs.VFS vfs *vfs.VFS
lock sync.Mutex lock sync.Mutex
} }
//Init a connection //Init a connection
func (d *Driver) Init(*ftp.Conn) { func (d *Driver) Init(c *ftp.Conn) {
defer log.Trace("", "Init session")("") defer log.Trace("", "Init session")("")
if d.s.proxy != nil {
id := fmt.Sprintf("%p", c)
d.s.pendingMu.Lock()
d.s.pending[id] = d
d.s.pendingMu.Unlock()
}
} }
//Stat get information on file or folder //Stat get information on file or folder

View File

@@ -8,83 +8,72 @@
package ftp package ftp
import ( import (
"context"
"fmt" "fmt"
"os"
"os/exec"
"testing" "testing"
ftp "github.com/goftp/server" ftp "github.com/goftp/server"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt" "github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
const ( const (
testHOST = "localhost" testHOST = "localhost"
testPORT = "51780" testPORT = "51780"
testPASSIVEPORTRANGE = "30000-32000" testPASSIVEPORTRANGE = "30000-32000"
testUSER = "rclone"
testPASS = "password"
) )
// TestFTP runs the ftp server then runs the unit tests for the // TestFTP runs the ftp server then runs the unit tests for the
// ftp remote against it. // ftp remote against it.
func TestFTP(t *testing.T) { func TestFTP(t *testing.T) {
opt := ftpopt.DefaultOpt // Configure and start the server
opt.ListenAddr = testHOST + ":" + testPORT start := func(f fs.Fs) (configmap.Simple, func()) {
opt.PassivePorts = testPASSIVEPORTRANGE opt := DefaultOpt
opt.BasicUser = "rclone" opt.ListenAddr = testHOST + ":" + testPORT
opt.BasicPass = "password" opt.PassivePorts = testPASSIVEPORTRANGE
opt.BasicUser = testUSER
opt.BasicPass = testPASS
fstest.Initialise() w, err := newServer(f, &opt)
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
// Start the server
w, err := newServer(fremote, &opt)
assert.NoError(t, err)
go func() {
err := w.serve()
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
defer func() {
err := w.close()
assert.NoError(t, err) assert.NoError(t, err)
}()
// Change directory to run the tests quit := make(chan struct{})
err = os.Chdir("../../../backend/ftp") go func() {
assert.NoError(t, err, "failed to cd to ftp remote") err := w.serve()
close(quit)
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
// Run the ftp tests with an on the fly remote // Config for the backend we'll use to connect to the server
args := []string{"test"} config := configmap.Simple{
if testing.Verbose() { "type": "ftp",
args = append(args, "-v") "host": testHOST,
"port": testPORT,
"user": testUSER,
"pass": obscure.MustObscure(testPASS),
}
return config, func() {
err := w.close()
assert.NoError(t, err)
<-quit
}
} }
if *fstest.Verbose {
args = append(args, "-verbose") servetest.Run(t, "ftp", start)
} }
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
args = append(args, "-remote", "ftptest:") func TestFindID(t *testing.T) {
cmd := exec.Command("go", args...) id, err := findID([]byte("TestFindID("))
cmd.Env = append(os.Environ(), require.NoError(t, err)
"RCLONE_CONFIG_FTPTEST_TYPE=ftp", // id should be the argument to this function
"RCLONE_CONFIG_FTPTEST_HOST="+testHOST, assert.Equal(t, fmt.Sprintf("%p", t), id)
"RCLONE_CONFIG_FTPTEST_PORT="+testPORT,
"RCLONE_CONFIG_FTPTEST_USER=rclone",
"RCLONE_CONFIG_FTPTEST_PASS=0HU5Hx42YiLoNGJxppOOP3QTbr-KB_MP", // ./rclone obscure password
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running ftp integration tests")
} }

View File

@@ -1,28 +0,0 @@
package ftpflags
import (
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = ftpopt.DefaultOpt
)
// AddFlagsPrefix adds flags for the ftpopt
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
}
// AddFlags adds flags for the httplib
func AddFlags(flagSet *pflag.FlagSet) {
AddFlagsPrefix(flagSet, "", &Opt)
}

View File

@@ -1,40 +0,0 @@
package ftpopt
// Help contains text describing the http server to add to the command
// help.
var Help = `
### Server options
Use --addr to specify which IP address and port the server should
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set --addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
#### Authentication
By default this will serve files without needing a login.
You can set a single username and password with the --user and --pass flags.
`
// Options contains options for the http Server
type Options struct {
//TODO add more options
ListenAddr string // Port to listen on
PublicIP string // Passive ports range
PassivePorts string // Passive ports range
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:2121",
PublicIP: "",
PassivePorts: "30000-32000",
BasicUser: "anonymous",
BasicPass: "",
}

View File

@@ -68,7 +68,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
f: f, f: f,
vfs: vfs.New(f, &vfsflags.Opt), vfs: vfs.New(f, &vfsflags.Opt),
} }
mux.HandleFunc("/", s.handler) mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
return s return s
} }
@@ -93,7 +93,10 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version) w.Header().Set("Server", "rclone/"+fs.Version)
urlPath := r.URL.Path urlPath, ok := s.Path(w, r)
if !ok {
return
}
isDir := strings.HasSuffix(urlPath, "/") isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/") remote := strings.Trim(urlPath, "/")
if isDir { if isDir {

View File

@@ -26,6 +26,9 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication") flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.") flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.") flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
if prefix == "" {
flags.StringVarP(flagSet, &Opt.Prefix, prefix+"prefix", "", Opt.Prefix, "Prefix for URLs.")
}
} }
// AddFlags adds flags for the httplib // AddFlags adds flags for the httplib

View File

@@ -44,6 +44,11 @@ for a transfer.
--max-header-bytes controls the maximum number of bytes the server will --max-header-bytes controls the maximum number of bytes the server will
accept in the HTTP header. accept in the HTTP header.
--prefix controls the URL prefix that rclone serves from. By default
rclone will serve from the root. If you used --prefix "rclone" then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve.
#### Authentication #### Authentication
By default this will serve files without needing a login. By default this will serve files without needing a login.
@@ -81,6 +86,7 @@ certificate authority certificate.
// Options contains options for the http Server // Options contains options for the http Server
type Options struct { type Options struct {
ListenAddr string // Port to listen on ListenAddr string // Port to listen on
Prefix string // prefix to strip from URLs
ServerReadTimeout time.Duration // Timeout for server reading data ServerReadTimeout time.Duration // Timeout for server reading data
ServerWriteTimeout time.Duration // Timeout for server writing data ServerWriteTimeout time.Duration // Timeout for server writing data
MaxHeaderBytes int // Maximum size of request header MaxHeaderBytes int // Maximum size of request header
@@ -91,8 +97,15 @@ type Options struct {
Realm string // realm for authentication Realm string // realm for authentication
BasicUser string // single username for basic auth if not using Htpasswd BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser BasicPass string // password for BasicUser
Auth AuthFn // custom Auth (not set by command line flags)
} }
// AuthFn if used will be used to authenticate user, pass. If an error
// is returned then the user is not authenticated.
//
// If a non nil value is returned then it is added to the context under the key
type AuthFn func(user, pass string) (value interface{}, err error)
// DefaultOpt is the default values used for Options // DefaultOpt is the default values used for Options
var DefaultOpt = Options{ var DefaultOpt = Options{
ListenAddr: "localhost:8080", ListenAddr: "localhost:8080",
@@ -117,9 +130,14 @@ type Server struct {
type contextUserType struct{} type contextUserType struct{}
// ContextUserKey is a simple context key // ContextUserKey is a simple context key for storing the username of the request
var ContextUserKey = &contextUserType{} var ContextUserKey = &contextUserType{}
type contextAuthType struct{}
// ContextAuthKey is a simple context key for storing info returned by AuthFn
var ContextAuthKey = &contextAuthType{}
// singleUserProvider provides the encrypted password for a single user // singleUserProvider provides the encrypted password for a single user
func (s *Server) singleUserProvider(user, realm string) string { func (s *Server) singleUserProvider(user, realm string) string {
if user == s.Opt.BasicUser { if user == s.Opt.BasicUser {
@@ -128,6 +146,27 @@ func (s *Server) singleUserProvider(user, realm string) string {
return "" return ""
} }
// parseAuthorization parses the Authorization header into user, pass
// it returns a boolean as to whether the parse was successful
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
authHeader := r.Header.Get("Authorization")
if authHeader != "" {
s := strings.SplitN(authHeader, " ", 2)
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
parts := strings.SplitN(string(b), ":", 2)
user = parts[0]
if len(parts) > 1 {
pass = parts[1]
ok = true
}
}
}
}
return
}
// NewServer creates an http server. The opt can be nil in which case // NewServer creates an http server. The opt can be nil in which case
// the default options will be used. // the default options will be used.
func NewServer(handler http.Handler, opt *Options) *Server { func NewServer(handler http.Handler, opt *Options) *Server {
@@ -143,17 +182,20 @@ func NewServer(handler http.Handler, opt *Options) *Server {
} }
// Use htpasswd if required on everything // Use htpasswd if required on everything
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" { if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil {
var secretProvider auth.SecretProvider var authenticator *auth.BasicAuth
if s.Opt.HtPasswd != "" { if s.Opt.Auth == nil {
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd) var secretProvider auth.SecretProvider
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd) if s.Opt.HtPasswd != "" {
} else { fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser) secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$"))) } else {
secretProvider = s.singleUserProvider fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
secretProvider = s.singleUserProvider
}
authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
} }
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
oldHandler := handler oldHandler := handler
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// No auth wanted for OPTIONS method // No auth wanted for OPTIONS method
@@ -161,26 +203,36 @@ func NewServer(handler http.Handler, opt *Options) *Server {
oldHandler.ServeHTTP(w, r) oldHandler.ServeHTTP(w, r)
return return
} }
if username := authenticator.CheckAuth(r); username == "" { unauthorized := func() {
authHeader := r.Header.Get(authenticator.Headers.V().Authorization) w.Header().Set("Content-Type", "text/plain")
if authHeader != "" { w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`)
s := strings.SplitN(authHeader, " ", 2) http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
var userName = "UNKNOWN"
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
userName = strings.SplitN(string(b), ":", 2)[0]
}
}
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
} else {
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
}
authenticator.RequireAuth(w, r)
} else {
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, username))
oldHandler.ServeHTTP(w, r)
} }
user, pass, authValid := parseAuthorization(r)
if !authValid {
unauthorized()
return
}
if s.Opt.Auth == nil {
if username := authenticator.CheckAuth(r); username == "" {
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
unauthorized()
return
}
} else {
// Custom Auth
value, err := s.Opt.Auth(user, pass)
if err != nil {
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
unauthorized()
return
}
if value != nil {
r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value))
}
}
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user))
oldHandler.ServeHTTP(w, r)
}) })
s.usingAuth = true s.usingAuth = true
} }
@@ -190,6 +242,14 @@ func NewServer(handler http.Handler, opt *Options) *Server {
log.Fatalf("Need both -cert and -key to use SSL") log.Fatalf("Need both -cert and -key to use SSL")
} }
// If a Path is set then serve from there
if strings.HasSuffix(s.Opt.Prefix, "/") {
s.Opt.Prefix = s.Opt.Prefix[:len(s.Opt.Prefix)-1]
}
if s.Opt.Prefix != "" && !strings.HasPrefix(s.Opt.Prefix, "/") {
s.Opt.Prefix = "/" + s.Opt.Prefix
}
// FIXME make a transport? // FIXME make a transport?
s.httpServer = &http.Server{ s.httpServer = &http.Server{
Addr: s.Opt.ListenAddr, Addr: s.Opt.ListenAddr,
@@ -299,10 +359,27 @@ func (s *Server) URL() string {
// (i.e. port assigned by operating system) // (i.e. port assigned by operating system)
addr = s.listener.Addr().String() addr = s.listener.Addr().String()
} }
return fmt.Sprintf("%s://%s/", proto, addr) return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.Prefix)
} }
// UsingAuth returns true if authentication is required // UsingAuth returns true if authentication is required
func (s *Server) UsingAuth() bool { func (s *Server) UsingAuth() bool {
return s.usingAuth return s.usingAuth
} }
// Path returns the current path with the Prefix stripped
//
// If it returns false, then the path was invalid and the handler
// should exit as the error response has already been sent
func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) {
Path = r.URL.Path
if s.Opt.Prefix == "" {
return Path, true
}
if !strings.HasPrefix(Path, s.Opt.Prefix+"/") {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return Path, false
}
Path = Path[len(s.Opt.Prefix):]
return Path, true
}

270
cmd/serve/proxy/proxy.go Normal file
View File

@@ -0,0 +1,270 @@
// Package proxy implements a programmable proxy for rclone serve
package proxy
import (
"bytes"
"encoding/json"
"os/exec"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
libcache "github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"golang.org/x/crypto/bcrypt"
)
// Help contains text describing how to use the proxy
var Help = strings.Replace(`
### Auth Proxy
If you supply the parameter |--auth-proxy /path/to/program| then
rclone will use that program to generate backends on the fly which
then are used to authenticate incoming requests. This uses a simple
JSON based protocl with input on STDIN and output on STDOUT.
There is an example program
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
in the rclone source code.
The program's job is to take a |user| and |pass| on the input and turn
those into the config for a backend on STDOUT in JSON format. This
config will have any default parameters for the backend added, but it
won't use configuration from environment variables or command line
options - it is the job of the proxy program to make a complete
config.
This config generated must have this extra parameter
- |_root| - root to use for the backend
And it may have this parameter
- |_obscure| - comma separated strings for parameters to obscure
For example the program might take this on STDIN
|||
{
"user": "me",
"pass": "mypassword"
}
|||
And return this on STDOUT
|||
{
"type": "sftp",
"_root": "",
"_obscure": "pass",
"user": "me",
"pass": "mypassword",
"host": "sftp.example.com"
}
|||
This would mean that an SFTP backend would be created on the fly for
the |user| and |pass| returned in the output to the host given. Note
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
parameter before creating the backend (which is required for sftp
backends).
The progam can manipulate the supplied |user| in any way, for example
to make proxy to many different sftp backends, you could make the
|user| be |user@example.com| and then set the |host| to |example.com|
in the output and the user to |user|. For security you'd probably want
to restrict the |host| to a limited list.
Note that an internal cache is keyed on |user| so only use that for
configuration, don't use |pass|. This also means that if a user's
password is changed the cache will need to expire (which takes 5 mins)
before it takes effect.
This can be used to build general purpose proxies to any kind of
backend that rclone supports.
`, "|", "`", -1)
// Options is options for creating the proxy
type Options struct {
AuthProxy string
}
// DefaultOpt is the default values uses for Opt
var DefaultOpt = Options{
AuthProxy: "",
}
// Proxy represents a proxy to turn auth requests into a VFS
type Proxy struct {
cmdLine []string // broken down command line
vfsCache *libcache.Cache
Opt Options
}
// cacheEntry is what is stored in the vfsCache
type cacheEntry struct {
vfs *vfs.VFS // stored VFS
pwHash []byte // bcrypt hash of the password
}
// New creates a new proxy with the Options passed in
func New(opt *Options) *Proxy {
return &Proxy{
Opt: *opt,
cmdLine: strings.Fields(opt.AuthProxy),
vfsCache: libcache.New(),
}
}
// run the proxy command returning a config map
func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil {
return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
}
var stdout, stderr bytes.Buffer
cmd.Stdin = bytes.NewBuffer(inBytes)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
start := time.Now()
err = cmd.Run()
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
duration := time.Since(start)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
}
err = json.Unmarshal(stdout.Bytes(), &config)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
}
fs.Debugf(nil, "Proxy returned in %v", duration)
// Obscure any values in the config map that need it
obscureFields, ok := config.Get("_obscure")
if ok {
for _, key := range strings.Split(obscureFields, ",") {
value, ok := config.Get(key)
if ok {
obscuredValue, err := obscure.Obscure(value)
if err != nil {
return nil, errors.Wrap(err, "proxy")
}
config.Set(key, obscuredValue)
}
}
}
return config, nil
}
// call runs the auth proxy and returns a cacheEntry and an error
func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}, err error) {
// Contact the proxy
config, err := p.run(map[string]string{
"user": user,
"pass": pass,
})
if err != nil {
return nil, err
}
// Look for required fields in the answer
fsName, ok := config.Get("type")
if !ok {
return nil, errors.New("proxy: type not set in result")
}
root, ok := config.Get("_root")
if !ok {
return nil, errors.New("proxy: _root not set in result")
}
// Find the backend
fsInfo, err := fs.Find(fsName)
if err != nil {
return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
}
// base name of config on user name. This may appear in logs
name := "proxy-" + user
fsString := name + ":" + root
// Look for fs in the VFS cache
value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) {
// Create the Fs from the cache
f, err := cache.GetFn(fsString, func(fsString string) (fs.Fs, error) {
// Update the config with the default values
for i := range fsInfo.Options {
o := &fsInfo.Options[i]
if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" {
config.Set(o.Name, o.String())
}
}
return fsInfo.NewFs(name, root, config)
})
if err != nil {
return nil, false, err
}
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
if err != nil {
return nil, false, err
}
entry := cacheEntry{
vfs: vfs.New(f, &vfsflags.Opt),
pwHash: pwHash,
}
return entry, true, nil
})
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to create backend")
}
return value, nil
}
// Call runs the auth proxy with the given input, returning a *vfs.VFS
// and the key used in the VFS cache.
func (p *Proxy) Call(user, pass string) (VFS *vfs.VFS, vfsKey string, err error) {
var passwordBytes = []byte(pass)
// Look in the cache first
value, ok := p.vfsCache.GetMaybe(user)
// If not found then call the proxy for a fresh answer
if !ok {
value, err = p.call(user, pass, passwordBytes)
if err != nil {
return nil, "", err
}
}
// check we got what we were expecting
entry, ok := value.(cacheEntry)
if !ok {
return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
}
// Check the password is correct in the cached entry. This
// prevents an attack where subsequent requests for the same
// user don't have their auth checked. It does mean that if
// the password is changed, the user will have to wait for
// cache expiry (5m) before trying again.
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
if err != nil {
return nil, "", errors.Wrap(err, "proxy: incorrect password")
}
return entry.vfs, user, nil
}
// Get VFS from the cache using key - returns nil if not found
func (p *Proxy) Get(key string) *vfs.VFS {
value, ok := p.vfsCache.GetMaybe(key)
if !ok {
return nil
}
entry := value.(cacheEntry)
return entry.vfs
}

View File

@@ -0,0 +1,41 @@
// +build ignore
// A simple auth proxy for testing purposes
package main
import (
"encoding/json"
"log"
"os"
)
func main() {
// Read the input
var in map[string]string
err := json.NewDecoder(os.Stdin).Decode(&in)
if err != nil {
log.Fatal(err)
}
// Write the output
var out = map[string]string{}
for k, v := range in {
switch k {
case "user":
v += "-test"
case "error":
log.Fatal(v)
}
out[k] = v
}
if out["type"] == "" {
out["type"] = "local"
}
if out["_root"] == "" {
out["_root"] = ""
}
json.NewEncoder(os.Stdout).Encode(&out)
if err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,145 @@
package proxy
import (
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/bcrypt"
)
func TestRun(t *testing.T) {
opt := DefaultOpt
cmd := "go run proxy_code.go"
opt.AuthProxy = cmd
p := New(&opt)
t.Run("Normal", func(t *testing.T) {
config, err := p.run(map[string]string{
"type": "ftp",
"user": "me",
"pass": "pass",
"host": "127.0.0.1",
})
require.NoError(t, err)
assert.Equal(t, configmap.Simple{
"type": "ftp",
"user": "me-test",
"pass": "pass",
"host": "127.0.0.1",
"_root": "",
}, config)
})
t.Run("Error", func(t *testing.T) {
config, err := p.run(map[string]string{
"error": "potato",
})
assert.Nil(t, config)
require.Error(t, err)
require.Contains(t, err.Error(), "potato")
})
t.Run("Obscure", func(t *testing.T) {
config, err := p.run(map[string]string{
"type": "ftp",
"user": "me",
"pass": "pass",
"host": "127.0.0.1",
"_obscure": "pass,user",
})
require.NoError(t, err)
config["user"] = obscure.MustReveal(config["user"])
config["pass"] = obscure.MustReveal(config["pass"])
assert.Equal(t, configmap.Simple{
"type": "ftp",
"user": "me-test",
"pass": "pass",
"host": "127.0.0.1",
"_obscure": "pass,user",
"_root": "",
}, config)
})
const testUser = "testUser"
const testPass = "testPass"
t.Run("call", func(t *testing.T) {
// check cache empty
assert.Equal(t, 0, p.vfsCache.Entries())
defer p.vfsCache.Clear()
passwordBytes := []byte(testPass)
value, err := p.call(testUser, testPass, passwordBytes)
require.NoError(t, err)
entry, ok := value.(cacheEntry)
require.True(t, ok)
// check hash is correct in entry
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
require.NoError(t, err)
require.NotNil(t, entry.vfs)
f := entry.vfs.Fs()
require.NotNil(t, f)
assert.Equal(t, "proxy-"+testUser, f.Name())
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
// check it is in the cache
assert.Equal(t, 1, p.vfsCache.Entries())
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
assert.True(t, ok)
assert.Equal(t, value, cacheValue)
})
t.Run("Call", func(t *testing.T) {
// check cache empty
assert.Equal(t, 0, p.vfsCache.Entries())
defer p.vfsCache.Clear()
vfs, vfsKey, err := p.Call(testUser, testPass)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check it is in the cache
assert.Equal(t, 1, p.vfsCache.Entries())
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
assert.True(t, ok)
cacheEntry, ok := cacheValue.(cacheEntry)
assert.True(t, ok)
assert.Equal(t, vfs, cacheEntry.vfs)
// Test Get works while we have something in the cache
t.Run("Get", func(t *testing.T) {
assert.Equal(t, vfs, p.Get(testUser))
assert.Nil(t, p.Get("unknown"))
})
// now try again from the cache
vfs, vfsKey, err = p.Call(testUser, testPass)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check cache is at the same level
assert.Equal(t, 1, p.vfsCache.Entries())
// now try again from the cache but with wrong password
vfs, vfsKey, err = p.Call(testUser, testPass+"wrong")
require.Error(t, err)
require.Contains(t, err.Error(), "incorrect password")
require.Nil(t, vfs)
require.Equal(t, "", vfsKey)
// check cache is at the same level
assert.Equal(t, 1, p.vfsCache.Entries())
})
}

View File

@@ -0,0 +1,18 @@
// Package proxyflags implements command line flags to set up a proxy
package proxyflags
import (
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = proxy.DefaultOpt
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.")
}

View File

@@ -171,7 +171,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
Server: httplib.NewServer(mux, opt), Server: httplib.NewServer(mux, opt),
f: f, f: f,
} }
mux.HandleFunc("/", s.handler) mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
return s return s
} }
@@ -211,7 +211,10 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes") w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version) w.Header().Set("Server", "rclone/"+fs.Version)
path := r.URL.Path path, ok := s.Path(w, r)
if !ok {
return
}
remote := makeRemote(path) remote := makeRemote(path)
fs.Debugf(s.f, "%s %s", r.Method, path) fs.Debugf(s.f, "%s %s", r.Method, path)

View File

@@ -0,0 +1,35 @@
// +build ignore
// A simple auth proxy for testing purposes
package main
import (
"encoding/json"
"log"
"os"
)
func main() {
if len(os.Args) < 2 {
log.Fatalf("Syntax: %s <root>", os.Args[0])
}
root := os.Args[1]
// Read the input
var in map[string]string
err := json.NewDecoder(os.Stdin).Decode(&in)
if err != nil {
log.Fatal(err)
}
// Write the output
var out = map[string]string{
"type": "local",
"_root": root,
"_obscure": "pass",
}
json.NewEncoder(os.Stdout).Encode(&out)
if err != nil {
log.Fatal(err)
}
}

View File

@@ -0,0 +1,107 @@
// Package servetest provides infrastructure for running loopback
// tests of "rclone serve backend:" against the backend integration
// tests.
package servetest
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// StartFn describes the callback which should start the server with
// the Fs passed in.
// It should return a config for the backend used to connect to the
// server and a clean up function
type StartFn func(f fs.Fs) (configmap.Simple, func())
// run runs the server then runs the unit tests for the remote against
// it.
func run(t *testing.T, name string, start StartFn, useProxy bool) {
fstest.Initialise()
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
f := fremote
if useProxy {
// If using a proxy don't pass in the backend
f = nil
// the backend config will be made by the proxy
prog, err := filepath.Abs("../servetest/proxy_code.go")
require.NoError(t, err)
cmd := "go run " + prog + " " + fremote.Root()
// FIXME this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
}()
}
config, cleanup := start(f)
defer cleanup()
// Change directory to run the tests
cwd, err := os.Getwd()
require.NoError(t, err)
err = os.Chdir("../../../backend/" + name)
require.NoError(t, err, "failed to cd to "+name+" backend")
defer func() {
// Change back to the old directory
require.NoError(t, os.Chdir(cwd))
}()
// Run the backend tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
if *fstest.Verbose {
args = append(args, "-verbose")
}
remoteName := name + "test:"
args = append(args, "-remote", remoteName)
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
cmd := exec.Command("go", args...)
// Configure the backend with environment variables
cmd.Env = os.Environ()
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
for k, v := range config {
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
}
// Run the test
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running "+name+" integration tests")
}
// Run runs the server then runs the unit tests for the remote against
// it.
func Run(t *testing.T, name string, start StartFn) {
t.Run("Normal", func(t *testing.T) {
run(t, name, start, false)
})
t.Run("AuthProxy", func(t *testing.T) {
run(t, name, start, true)
})
}

View File

@@ -47,7 +47,6 @@ func shellUnEscape(str string) string {
// Info about the current connection // Info about the current connection
type conn struct { type conn struct {
vfs *vfs.VFS vfs *vfs.VFS
f fs.Fs
handlers sftp.Handlers handlers sftp.Handlers
what string what string
} }
@@ -65,7 +64,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args) fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
switch binary { switch binary {
case "df": case "df":
about := c.f.Features().About about := c.vfs.Fs().Features().About
if about == nil { if about == nil {
return errors.New("df not supported") return errors.New("df not supported")
} }
@@ -121,7 +120,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
// special cases for rclone command detection // special cases for rclone command detection
switch args { switch args {
case "'abc' | md5sum": case "'abc' | md5sum":
if c.f.Hashes().Contains(hash.MD5) { if c.vfs.Fs().Hashes().Contains(hash.MD5) {
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n") _, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
if err != nil { if err != nil {
return errors.Wrap(err, "send output failed") return errors.Wrap(err, "send output failed")
@@ -130,7 +129,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
return errors.New("md5 hash not supported") return errors.New("md5 hash not supported")
} }
case "'abc' | sha1sum": case "'abc' | sha1sum":
if c.f.Hashes().Contains(hash.SHA1) { if c.vfs.Fs().Hashes().Contains(hash.SHA1) {
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n") _, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
if err != nil { if err != nil {
return errors.Wrap(err, "send output failed") return errors.Wrap(err, "send output failed")

View File

@@ -19,14 +19,14 @@ type vfsHandler struct {
} }
// vfsHandler returns a Handlers object with the test handlers. // vfsHandler returns a Handlers object with the test handlers.
func newVFSHandler(vfs *vfs.VFS) (sftp.Handlers, error) { func newVFSHandler(vfs *vfs.VFS) sftp.Handlers {
v := vfsHandler{VFS: vfs} v := vfsHandler{VFS: vfs}
return sftp.Handlers{ return sftp.Handlers{
FileGet: v, FileGet: v,
FilePut: v, FilePut: v,
FileCmd: v, FileCmd: v,
FileList: v, FileList: v,
}, nil }
} }
func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) { func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) {

View File

@@ -18,7 +18,8 @@ import (
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pkg/sftp" "github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
@@ -33,21 +34,47 @@ type server struct {
opt Options opt Options
vfs *vfs.VFS vfs *vfs.VFS
config *ssh.ServerConfig config *ssh.ServerConfig
handlers sftp.Handlers
listener net.Listener listener net.Listener
waitChan chan struct{} // for waiting on the listener to close waitChan chan struct{} // for waiting on the listener to close
proxy *proxy.Proxy
} }
func newServer(f fs.Fs, opt *Options) *server { func newServer(f fs.Fs, opt *Options) *server {
s := &server{ s := &server{
f: f, f: f,
vfs: vfs.New(f, &vfsflags.Opt),
opt: *opt, opt: *opt,
waitChan: make(chan struct{}), waitChan: make(chan struct{}),
} }
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(&proxyflags.Opt)
} else {
s.vfs = vfs.New(f, &vfsflags.Opt)
}
return s return s
} }
// getVFS gets the vfs from s or the proxy
func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
if s.proxy == nil {
return s.vfs
}
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
fs.Infof(what, "SSH Permissions Extensions not found")
return nil
}
key := sshConn.Permissions.Extensions["_vfsKey"]
if key == "" {
fs.Infof(what, "VFS key not found")
return nil
}
VFS = s.proxy.Get(key)
if VFS == nil {
fs.Infof(what, "failed to read VFS from cache")
return nil
}
return VFS
}
func (s *server) acceptConnections() { func (s *server) acceptConnections() {
for { for {
nConn, err := s.listener.Accept() nConn, err := s.listener.Accept()
@@ -73,11 +100,15 @@ func (s *server) acceptConnections() {
go ssh.DiscardRequests(reqs) go ssh.DiscardRequests(reqs)
c := &conn{ c := &conn{
vfs: s.vfs, what: what,
f: s.f, vfs: s.getVFS(what, sshConn),
handlers: s.handlers,
what: what,
} }
if c.vfs == nil {
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
_ = nConn.Close()
continue
}
c.handlers = newVFSHandler(c.vfs)
// Accept all channels // Accept all channels
go c.handleChannels(chans) go c.handleChannels(chans)
@@ -109,7 +140,19 @@ func (s *server) serve() (err error) {
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent, ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User()) fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
if s.opt.User != "" && s.opt.Pass != "" { if s.proxy != nil {
// query the proxy for the config
_, vfsKey, err := s.proxy.Call(c.User(), string(pass))
if err != nil {
return nil, err
}
// just return the Key so we can get it back from the cache
return &ssh.Permissions{
Extensions: map[string]string{
"_vfsKey": vfsKey,
},
}, nil
} else if s.opt.User != "" && s.opt.Pass != "" {
userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User)) userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User))
passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass)) passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass))
if (userOK & passOK) == 1 { if (userOK & passOK) == 1 {
@@ -120,6 +163,9 @@ func (s *server) serve() (err error) {
}, },
PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) { PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User()) fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User())
if s.proxy != nil {
return nil, errors.New("public key login not allowed when using auth proxy")
}
if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok { if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok {
return &ssh.Permissions{ return &ssh.Permissions{
// Record the public key used for authentication. // Record the public key used for authentication.
@@ -178,11 +224,6 @@ func (s *server) serve() (err error) {
} }
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr()) fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
s.handlers, err = newVFSHandler(s.vfs)
if err != nil {
return errors.Wrap(err, "serve sftp: failed to create fs")
}
go s.acceptConnections() go s.acceptConnections()
return nil return nil

View File

@@ -6,6 +6,9 @@ package sftp
import ( import (
"github.com/rclone/rclone/cmd" "github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs"
@@ -46,6 +49,7 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
func init() { func init() {
vfsflags.AddFlags(Command.Flags()) vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags(), &Opt) AddFlags(Command.Flags(), &Opt)
} }
@@ -84,10 +88,15 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients. sftp backend, but it may not be with other SFTP clients.
` + vfs.Help, ` + vfs.Help + proxy.Help,
Run: func(command *cobra.Command, args []string) { Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args) var f fs.Fs
f := cmd.NewFsSrc(args) if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, true, command, func() error { cmd.Run(false, true, command, func() error {
s := newServer(f, &Opt) s := newServer(f, &Opt)
err := s.Serve() err := s.Serve()

View File

@@ -8,16 +8,15 @@
package sftp package sftp
import ( import (
"context"
"os"
"os/exec"
"strings" "strings"
"testing" "testing"
"github.com/pkg/sftp" "github.com/pkg/sftp"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -38,58 +37,35 @@ var (
// TestSftp runs the sftp server then runs the unit tests for the // TestSftp runs the sftp server then runs the unit tests for the
// sftp remote against it. // sftp remote against it.
func TestSftp(t *testing.T) { func TestSftp(t *testing.T) {
fstest.Initialise() // Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := DefaultOpt
opt.ListenAddr = testBindAddress
opt.User = testUser
opt.Pass = testPass
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir) w := newServer(f, &opt)
assert.NoError(t, err) assert.NoError(t, w.serve())
defer clean()
err = fremote.Mkdir(context.Background(), "") // Read the host and port we started on
assert.NoError(t, err) addr := w.Addr()
colon := strings.LastIndex(addr, ":")
opt := DefaultOpt // Config for the backend we'll use to connect to the server
opt.ListenAddr = testBindAddress config := configmap.Simple{
opt.User = testUser "type": "sftp",
opt.Pass = testPass "user": testUser,
"pass": obscure.MustObscure(testPass),
"host": addr[:colon],
"port": addr[colon+1:],
}
// Start the server // return a stop function
w := newServer(fremote, &opt) return config, func() {
assert.NoError(t, w.serve()) w.Close()
defer func() { w.Wait()
w.Close() }
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/sftp")
assert.NoError(t, err, "failed to cd to sftp backend")
// Run the sftp tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
} }
if *fstest.Verbose {
args = append(args, "-verbose") servetest.Run(t, "sftp", start)
}
args = append(args, "-remote", "sftptest:")
cmd := exec.Command("go", args...)
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
if colon < 0 {
panic("need a : in the address: " + addr)
}
host, port := addr[:colon], addr[colon+1:]
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_SFTPTEST_TYPE=sftp",
"RCLONE_CONFIG_SFTPTEST_HOST="+host,
"RCLONE_CONFIG_SFTPTEST_PORT="+port,
"RCLONE_CONFIG_SFTPTEST_USER="+testUser,
"RCLONE_CONFIG_SFTPTEST_PASS="+obscure.MustObscure(testPass),
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running sftp integration tests")
} }

View File

@@ -12,9 +12,11 @@ import (
"github.com/rclone/rclone/cmd/serve/httplib" "github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags" "github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve" "github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags" "github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -30,6 +32,7 @@ var (
func init() { func init() {
httpflags.AddFlags(Command.Flags()) httpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags()) vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off") Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off")
Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory") Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory")
} }
@@ -57,10 +60,15 @@ supported hash on the backend or you can use a named hash such as
Use "rclone hashsum" to see the full list. Use "rclone hashsum" to see the full list.
` + httplib.Help + vfs.Help, ` + httplib.Help + vfs.Help + proxy.Help,
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args) var f fs.Fs
f := cmd.NewFsSrc(args) if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
hashType = hash.None hashType = hash.None
if hashName == "auto" { if hashName == "auto" {
hashType = f.Hashes().GetOne() hashType = f.Hashes().GetOne()
@@ -101,8 +109,9 @@ Use "rclone hashsum" to see the full list.
type WebDAV struct { type WebDAV struct {
*httplib.Server *httplib.Server
f fs.Fs f fs.Fs
vfs *vfs.VFS _vfs *vfs.VFS // don't use directly, use getVFS
webdavhandler *webdav.Handler webdavhandler *webdav.Handler
proxy *proxy.Proxy
} }
// check interface // check interface
@@ -111,21 +120,58 @@ var _ webdav.FileSystem = (*WebDAV)(nil)
// Make a new WebDAV to serve the remote // Make a new WebDAV to serve the remote
func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV { func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV {
w := &WebDAV{ w := &WebDAV{
f: f, f: f,
vfs: vfs.New(f, &vfsflags.Opt),
} }
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(&proxyflags.Opt)
// override auth
copyOpt := *opt
copyOpt.Auth = w.auth
opt = &copyOpt
} else {
w._vfs = vfs.New(f, &vfsflags.Opt)
}
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
webdavHandler := &webdav.Handler{ webdavHandler := &webdav.Handler{
Prefix: w.Server.Opt.Prefix,
FileSystem: w, FileSystem: w,
LockSystem: webdav.NewMemLS(), LockSystem: webdav.NewMemLS(),
Logger: w.logRequest, // FIXME Logger: w.logRequest, // FIXME
} }
w.webdavhandler = webdavHandler w.webdavhandler = webdavHandler
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
return w return w
} }
// Gets the VFS in use for this request
func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
if w._vfs != nil {
return w._vfs, nil
}
value := ctx.Value(httplib.ContextAuthKey)
if value == nil {
return nil, errors.New("no VFS found in context")
}
VFS, ok := value.(*vfs.VFS)
if !ok {
return nil, errors.Errorf("context value is not VFS: %#v", value)
}
return VFS, nil
}
// auth does proxy authorization
func (w *WebDAV) auth(user, pass string) (value interface{}, err error) {
VFS, _, err := w.proxy.Call(user, pass)
if err != nil {
return nil, err
}
return VFS, err
}
func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) { func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
urlPath := r.URL.Path urlPath, ok := w.Path(rw, r)
if !ok {
return
}
isDir := strings.HasSuffix(urlPath, "/") isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/") remote := strings.Trim(urlPath, "/")
if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir { if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
@@ -138,8 +184,14 @@ func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
// serveDir serves a directory index at dirRemote // serveDir serves a directory index at dirRemote
// This is similar to serveDir in serve http. // This is similar to serveDir in serve http.
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) { func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
VFS, err := w.getVFS(r.Context())
if err != nil {
http.Error(rw, "Root directory not found", http.StatusNotFound)
fs.Errorf(nil, "Failed to serve directory: %v", err)
return
}
// List the directory // List the directory
node, err := w.vfs.Stat(dirRemote) node, err := VFS.Stat(dirRemote)
if err == vfs.ENOENT { if err == vfs.ENOENT {
http.Error(rw, "Directory not found", http.StatusNotFound) http.Error(rw, "Directory not found", http.StatusNotFound)
return return
@@ -186,8 +238,12 @@ func (w *WebDAV) logRequest(r *http.Request, err error) {
// Mkdir creates a directory // Mkdir creates a directory
func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) { func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) {
defer log.Trace(name, "perm=%v", perm)("err = %v", &err) // defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
dir, leaf, err := w.vfs.StatParent(name) VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
dir, leaf, err := VFS.StatParent(name)
if err != nil { if err != nil {
return err return err
} }
@@ -197,8 +253,12 @@ func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err
// OpenFile opens a file or a directory // OpenFile opens a file or a directory
func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) { func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) {
defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err) // defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
f, err := w.vfs.OpenFile(name, flags, perm) VFS, err := w.getVFS(ctx)
if err != nil {
return nil, err
}
f, err := VFS.OpenFile(name, flags, perm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -207,8 +267,12 @@ func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.F
// RemoveAll removes a file or a directory and its contents // RemoveAll removes a file or a directory and its contents
func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) { func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
defer log.Trace(name, "")("err = %v", &err) // defer log.Trace(name, "")("err = %v", &err)
node, err := w.vfs.Stat(name) VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
node, err := VFS.Stat(name)
if err != nil { if err != nil {
return err return err
} }
@@ -221,14 +285,22 @@ func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
// Rename a file or a directory // Rename a file or a directory
func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) { func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) {
defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err) // defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
return w.vfs.Rename(oldName, newName) VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
return VFS.Rename(oldName, newName)
} }
// Stat returns info about the file or directory // Stat returns info about the file or directory
func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) { func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) {
defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err) // defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
fi, err = w.vfs.Stat(name) VFS, err := w.getVFS(ctx)
if err != nil {
return nil, err
}
fi, err = VFS.Stat(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -270,7 +342,7 @@ type FileInfo struct {
// ETag returns an ETag for the FileInfo // ETag returns an ETag for the FileInfo
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) { func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err) // defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
if hashType == hash.None { if hashType == hash.None {
return "", webdav.ErrNotImplemented return "", webdav.ErrNotImplemented
} }
@@ -293,7 +365,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
// ContentType returns a content type for the FileInfo // ContentType returns a content type for the FileInfo
func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) { func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) {
defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err) // defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
node, ok := (fi.FileInfo).(vfs.Node) node, ok := (fi.FileInfo).(vfs.Node)
if !ok { if !ok {
fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo) fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo)

View File

@@ -8,21 +8,22 @@
package webdav package webdav
import ( import (
"context"
"flag" "flag"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os" "os"
"os/exec"
"strings" "strings"
"testing" "testing"
"time" "time"
_ "github.com/rclone/rclone/backend/local" _ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib" "github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/net/webdav" "golang.org/x/net/webdav"
@@ -30,6 +31,8 @@ import (
const ( const (
testBindAddress = "localhost:0" testBindAddress = "localhost:0"
testUser = "user"
testPass = "pass"
) )
// check interfaces // check interfaces
@@ -42,50 +45,34 @@ var (
// TestWebDav runs the webdav server then runs the unit tests for the // TestWebDav runs the webdav server then runs the unit tests for the
// webdav remote against it. // webdav remote against it.
func TestWebDav(t *testing.T) { func TestWebDav(t *testing.T) {
opt := httplib.DefaultOpt // Configure and start the server
opt.ListenAddr = testBindAddress start := func(f fs.Fs) (configmap.Simple, func()) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
opt.BasicUser = testUser
opt.BasicPass = testPass
hashType = hash.MD5
fstest.Initialise() // Start the server
w := newWebDAV(f, &opt)
assert.NoError(t, w.serve())
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir) // Config for the backend we'll use to connect to the server
assert.NoError(t, err) config := configmap.Simple{
defer clean() "type": "webdav",
"vendor": "other",
"url": w.Server.URL(),
"user": testUser,
"pass": obscure.MustObscure(testPass),
}
err = fremote.Mkdir(context.Background(), "") return config, func() {
assert.NoError(t, err) w.Close()
w.Wait()
// Start the server }
w := newWebDAV(fremote, &opt)
assert.NoError(t, w.serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/webdav")
assert.NoError(t, err, "failed to cd to webdav remote")
// Run the webdav tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
} }
if *fstest.Verbose {
args = append(args, "-verbose") servetest.Run(t, "webdav", start)
}
args = append(args, "-remote", "webdavtest:")
cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(),
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running webdav integration tests")
} }
// Test serve http functionality in serve webdav // Test serve http functionality in serve webdav
@@ -97,10 +84,6 @@ var (
) )
func TestHTTPFunction(t *testing.T) { func TestHTTPFunction(t *testing.T) {
// cd to correct directory for testing
err := os.Chdir("../../cmd/serve/webdav")
assert.NoError(t, err, "failed to cd to webdav cmd directory")
// exclude files called hidden.txt and directories called hidden // exclude files called hidden.txt and directories called hidden
require.NoError(t, filter.Active.AddRule("- hidden.txt")) require.NoError(t, filter.Active.AddRule("- hidden.txt"))
require.NoError(t, filter.Active.AddRule("- hidden/**")) require.NoError(t, filter.Active.AddRule("- hidden/**"))

88
fs/cache/cache.go vendored
View File

@@ -2,93 +2,39 @@
package cache package cache
import ( import (
"sync"
"time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/cache"
) )
var ( var (
fsCacheMu sync.Mutex c = cache.New()
fsCache = map[string]*cacheEntry{}
fsNewFs = fs.NewFs // for tests
expireRunning = false
cacheExpireDuration = 300 * time.Second // expire the cache entry when it is older than this
cacheExpireInterval = 60 * time.Second // interval to run the cache expire
) )
type cacheEntry struct { // GetFn gets a fs.Fs named fsString either from the cache or creates
f fs.Fs // cached f // it afresh with the create function
err error // nil or fs.ErrorIsFile func GetFn(fsString string, create func(fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
fsString string // remote string value, err := c.Get(fsString, func(fsString string) (value interface{}, ok bool, error error) {
lastUsed time.Time // time used for expiry f, err := create(fsString)
ok = err == nil || err == fs.ErrorIsFile
return f, ok, err
})
if err != nil {
return nil, err
}
return value.(fs.Fs), nil
} }
// Get gets a fs.Fs named fsString either from the cache or creates it afresh // Get gets a fs.Fs named fsString either from the cache or creates it afresh
func Get(fsString string) (f fs.Fs, err error) { func Get(fsString string) (f fs.Fs, err error) {
fsCacheMu.Lock() return GetFn(fsString, fs.NewFs)
entry, ok := fsCache[fsString]
if !ok {
fsCacheMu.Unlock() // Unlock in case Get is called recursively
f, err = fsNewFs(fsString)
if err != nil && err != fs.ErrorIsFile {
return f, err
}
entry = &cacheEntry{
f: f,
fsString: fsString,
err: err,
}
fsCacheMu.Lock()
fsCache[fsString] = entry
}
defer fsCacheMu.Unlock()
entry.lastUsed = time.Now()
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
return entry.f, entry.err
} }
// Put puts an fs.Fs named fsString into the cache // Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) { func Put(fsString string, f fs.Fs) {
fsCacheMu.Lock() c.Put(fsString, f)
defer fsCacheMu.Unlock()
fsCache[fsString] = &cacheEntry{
f: f,
fsString: fsString,
lastUsed: time.Now(),
}
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
}
// cacheExpire expires any entries that haven't been used recently
func cacheExpire() {
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
now := time.Now()
for fsString, entry := range fsCache {
if now.Sub(entry.lastUsed) > cacheExpireDuration {
delete(fsCache, fsString)
}
}
if len(fsCache) != 0 {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
} else {
expireRunning = false
}
} }
// Clear removes everything from the cahce // Clear removes everything from the cahce
func Clear() { func Clear() {
fsCacheMu.Lock() c.Clear()
for k := range fsCache {
delete(fsCache, k)
}
fsCacheMu.Unlock()
} }

View File

@@ -4,7 +4,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockfs" "github.com/rclone/rclone/fstest/mockfs"
@@ -17,10 +16,9 @@ var (
errSentinel = errors.New("an error") errSentinel = errors.New("an error")
) )
func mockNewFs(t *testing.T) func() { func mockNewFs(t *testing.T) (func(), func(path string) (fs.Fs, error)) {
called = 0 called = 0
oldFsNewFs := fsNewFs create := func(path string) (fs.Fs, error) {
fsNewFs = func(path string) (fs.Fs, error) {
assert.Equal(t, 0, called) assert.Equal(t, 0, called)
called++ called++
switch path { switch path {
@@ -33,115 +31,74 @@ func mockNewFs(t *testing.T) func() {
} }
panic(fmt.Sprintf("Unknown path %q", path)) panic(fmt.Sprintf("Unknown path %q", path))
} }
return func() { cleanup := func() {
fsNewFs = oldFsNewFs c.Clear()
fsCacheMu.Lock()
fsCache = map[string]*cacheEntry{}
expireRunning = false
fsCacheMu.Unlock()
} }
return cleanup, create
} }
func TestGet(t *testing.T) { func TestGet(t *testing.T) {
defer mockNewFs(t)() cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, len(fsCache)) assert.Equal(t, 0, c.Entries())
f, err := Get("/") f, err := GetFn("/", create)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 1, len(fsCache)) assert.Equal(t, 1, c.Entries())
f2, err := Get("/") f2, err := GetFn("/", create)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, f, f2) assert.Equal(t, f, f2)
} }
func TestGetFile(t *testing.T) { func TestGetFile(t *testing.T) {
defer mockNewFs(t)() cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, len(fsCache)) assert.Equal(t, 0, c.Entries())
f, err := Get("/file.txt") f, err := GetFn("/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err) require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, len(fsCache)) assert.Equal(t, 1, c.Entries())
f2, err := Get("/file.txt") f2, err := GetFn("/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err) require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, f, f2) assert.Equal(t, f, f2)
} }
func TestGetError(t *testing.T) { func TestGetError(t *testing.T) {
defer mockNewFs(t)() cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, len(fsCache)) assert.Equal(t, 0, c.Entries())
f, err := Get("/error") f, err := GetFn("/error", create)
require.Equal(t, errSentinel, err) require.Equal(t, errSentinel, err)
require.Equal(t, nil, f) require.Equal(t, nil, f)
assert.Equal(t, 0, len(fsCache)) assert.Equal(t, 0, c.Entries())
} }
func TestPut(t *testing.T) { func TestPut(t *testing.T) {
defer mockNewFs(t)() cleanup, create := mockNewFs(t)
defer cleanup()
f := mockfs.NewFs("mock", "mock") f := mockfs.NewFs("mock", "mock")
assert.Equal(t, 0, len(fsCache)) assert.Equal(t, 0, c.Entries())
Put("/alien", f) Put("/alien", f)
assert.Equal(t, 1, len(fsCache)) assert.Equal(t, 1, c.Entries())
fNew, err := Get("/alien") fNew, err := GetFn("/alien", create)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, f, fNew) require.Equal(t, f, fNew)
assert.Equal(t, 1, len(fsCache)) assert.Equal(t, 1, c.Entries())
}
func TestCacheExpire(t *testing.T) {
defer mockNewFs(t)()
cacheExpireInterval = time.Millisecond
assert.Equal(t, false, expireRunning)
_, err := Get("/")
require.NoError(t, err)
fsCacheMu.Lock()
entry := fsCache["/"]
assert.Equal(t, 1, len(fsCache))
fsCacheMu.Unlock()
cacheExpire()
fsCacheMu.Lock()
assert.Equal(t, 1, len(fsCache))
entry.lastUsed = time.Now().Add(-cacheExpireDuration - 60*time.Second)
assert.Equal(t, true, expireRunning)
fsCacheMu.Unlock()
time.Sleep(10 * time.Millisecond)
fsCacheMu.Lock()
assert.Equal(t, false, expireRunning)
assert.Equal(t, 0, len(fsCache))
fsCacheMu.Unlock()
}
func TestClear(t *testing.T) {
defer mockNewFs(t)()
assert.Equal(t, 0, len(fsCache))
_, err := Get("/")
require.NoError(t, err)
assert.Equal(t, 1, len(fsCache))
Clear()
assert.Equal(t, 0, len(fsCache))
} }

View File

@@ -6,6 +6,7 @@ import (
"testing" "testing"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
@@ -52,7 +53,7 @@ func TestMultithreadCopy(t *testing.T) {
} { } {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) { t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
var err error var err error
contents := fstest.RandomString(test.size) contents := random.String(test.size)
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z") t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
file1 := r.WriteObject(context.Background(), "file1", contents, t1) file1 := r.WriteObject(context.Background(), "file1", contents, t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)

View File

@@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
@@ -28,6 +27,7 @@ import (
"github.com/rclone/rclone/fs/march" "github.com/rclone/rclone/fs/march"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@@ -1666,7 +1666,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
// to avoid issues with certain remotes and avoid file deletion. // to avoid issues with certain remotes and avoid file deletion.
if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) {
// Create random name to temporarily move file to // Create random name to temporarily move file to
tmpObjName := dstFileName + "-rclone-move-" + random(8) tmpObjName := dstFileName + "-rclone-move-" + random.String(8)
_, err := fdst.NewObject(ctx, tmpObjName) _, err := fdst.NewObject(ctx, tmpObjName)
if err != fs.ErrorObjectNotFound { if err != fs.ErrorObjectNotFound {
if err == nil { if err == nil {
@@ -1730,17 +1730,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
return err return err
} }
// random generates a pseudorandom alphanumeric string
func random(length int) string {
randomOutput := make([]byte, length)
possibleCharacters := "123567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
rand.Seed(time.Now().Unix())
for i := range randomOutput {
randomOutput[i] = possibleCharacters[rand.Intn(len(possibleCharacters))]
}
return string(randomOutput)
}
// MoveFile moves a single file possibly to a new name // MoveFile moves a single file possibly to a new name
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)

View File

@@ -20,8 +20,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.") flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.")
flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.") flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.") flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.")
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "w", false, "Launch WebGUI on localhost") flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost")
flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Update / Force update to latest version of web gui") flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Update / Force update to latest version of web gui")
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/negative0/rclone-webui-react/releases/latest", "URL to fetch the releases from") flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions) httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
} }

View File

@@ -95,6 +95,8 @@ func (s *Server) Serve() error {
// Don't open browser if serving in testing environment. // Don't open browser if serving in testing environment.
if flag.Lookup("test.v") == nil { if flag.Lookup("test.v") == nil {
_ = open.Start(openURL.String()) _ = open.Start(openURL.String())
} else {
fs.Errorf(nil, "Not opening browser in testing environment")
} }
} }
return nil return nil

View File

@@ -27,6 +27,7 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm" "golang.org/x/text/unicode/norm"
@@ -357,24 +358,6 @@ func Time(timeString string) time.Time {
return t return t
} }
// RandomString create a random string for test purposes
func RandomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// LocalRemote creates a temporary directory name for local remotes // LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) { func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone") path, err = ioutil.TempDir("", "rclone")
@@ -403,7 +386,7 @@ func RandomRemoteName(remoteName string) (string, string, error) {
if !strings.HasSuffix(remoteName, ":") { if !strings.HasSuffix(remoteName, ":") {
remoteName += "/" remoteName += "/"
} }
leafName = "rclone-test-" + RandomString(24) leafName = "rclone-test-" + random.String(24)
if !MatchTestRemote.MatchString(leafName) { if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName) log.Fatalf("%q didn't match the test remote name regexp", leafName)
} }
@@ -432,7 +415,7 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
if err != nil { if err != nil {
return nil, "", nil, err return nil, "", nil, err
} }
remoteName += "/rclone-test-subdir-" + RandomString(8) remoteName += "/rclone-test-subdir-" + random.String(8)
} }
remote, err := fs.NewFs(remoteName) remote, err := fs.NewFs(remoteName)

View File

@@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@@ -157,7 +158,7 @@ func testPut(t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
contents string contents string
) )
retry(t, "Put", func() error { retry(t, "Put", func() error {
contents = fstest.RandomString(100) contents = random.String(100)
buf := bytes.NewBufferString(contents) buf := bytes.NewBufferString(contents)
uploadHash = hash.NewMultiHasher() uploadHash = hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash) in := io.TeeReader(buf, uploadHash)
@@ -557,7 +558,7 @@ func Run(t *testing.T, opt *Opt) {
const N = 5 * 1024 const N = 5 * 1024
// Read N bytes then produce an error // Read N bytes then produce an error
contents := fstest.RandomString(N) contents := random.String(N)
buf := bytes.NewBufferString(contents) buf := bytes.NewBufferString(contents)
er := &errorReader{errors.New("potato")} er := &errorReader{errors.New("potato")}
in := io.MultiReader(buf, er) in := io.MultiReader(buf, er)
@@ -1322,7 +1323,7 @@ func Run(t *testing.T, opt *Opt) {
// TestObjectUpdate tests that Update works // TestObjectUpdate tests that Update works
t.Run("ObjectUpdate", func(t *testing.T) { t.Run("ObjectUpdate", func(t *testing.T) {
skipIfNotOk(t) skipIfNotOk(t)
contents := fstest.RandomString(200) contents := random.String(200)
buf := bytes.NewBufferString(contents) buf := bytes.NewBufferString(contents)
hash := hash.NewMultiHasher() hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash) in := io.TeeReader(buf, hash)
@@ -1507,7 +1508,7 @@ func Run(t *testing.T, opt *Opt) {
contentSize = 100 contentSize = 100
) )
retry(t, "PutStream", func() error { retry(t, "PutStream", func() error {
contents := fstest.RandomString(contentSize) contents := random.String(contentSize)
buf := bytes.NewBufferString(contents) buf := bytes.NewBufferString(contents)
uploadHash = hash.NewMultiHasher() uploadHash = hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash) in := io.TeeReader(buf, uploadHash)
@@ -1564,7 +1565,7 @@ func Run(t *testing.T, opt *Opt) {
assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1") assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
}() }()
contents := fstest.RandomString(100) contents := random.String(100)
in := bytes.NewBufferString(contents) in := bytes.NewBufferString(contents)
obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil) obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
@@ -1587,7 +1588,7 @@ func Run(t *testing.T, opt *Opt) {
assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1") assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
}() }()
newContents := fstest.RandomString(200) newContents := random.String(200)
in := bytes.NewBufferString(newContents) in := bytes.NewBufferString(newContents)
obj := findObject(t, remote, unknownSizeUpdateFile.Path) obj := findObject(t, remote, unknownSizeUpdateFile.Path)

134
lib/cache/cache.go vendored Normal file
View File

@@ -0,0 +1,134 @@
// Package cache implements a simple cache where the entries are
// expired after a given time (5 minutes of disuse by default).
package cache
import (
"sync"
"time"
)
// Cache holds values indexed by string, but expired after a given (5
// minutes by default).
type Cache struct {
mu sync.Mutex
cache map[string]*cacheEntry
expireRunning bool
expireDuration time.Duration // expire the cache entry when it is older than this
expireInterval time.Duration // interval to run the cache expire
}
// New creates a new cache with the default expire duration and interval
func New() *Cache {
return &Cache{
cache: map[string]*cacheEntry{},
expireRunning: false,
expireDuration: 300 * time.Second,
expireInterval: 60 * time.Second,
}
}
// cacheEntry is stored in the cache
type cacheEntry struct {
value interface{} // cached item
err error // creation error
key string // key
lastUsed time.Time // time used for expiry
}
// CreateFunc is called to create new values. If the create function
// returns an error it will be cached if ok is true, otherwise the
// error will just be returned, allowing negative caching if required.
type CreateFunc func(key string) (value interface{}, ok bool, error error)
// used marks an entry as accessed now and kicks the expire timer off
// should be called with the lock held
func (c *Cache) used(entry *cacheEntry) {
entry.lastUsed = time.Now()
if !c.expireRunning {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
}
}
// Get gets a value named key either from the cache or creates it
// afresh with the create function.
func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error) {
c.mu.Lock()
entry, ok := c.cache[key]
if !ok {
c.mu.Unlock() // Unlock in case Get is called recursively
value, ok, err = create(key)
if err != nil && !ok {
return value, err
}
entry = &cacheEntry{
value: value,
key: key,
err: err,
}
c.mu.Lock()
c.cache[key] = entry
}
defer c.mu.Unlock()
c.used(entry)
return entry.value, entry.err
}
// Put puts an value named key into the cache
func (c *Cache) Put(key string, value interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
entry := &cacheEntry{
value: value,
key: key,
}
c.used(entry)
c.cache[key] = entry
}
// GetMaybe returns the key and true if found, nil and false if not
func (c *Cache) GetMaybe(key string) (value interface{}, found bool) {
c.mu.Lock()
defer c.mu.Unlock()
entry, found := c.cache[key]
if !found {
return nil, found
}
c.used(entry)
return entry.value, found
}
// cacheExpire expires any entries that haven't been used recently
func (c *Cache) cacheExpire() {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
for key, entry := range c.cache {
if now.Sub(entry.lastUsed) > c.expireDuration {
delete(c.cache, key)
}
}
if len(c.cache) != 0 {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
} else {
c.expireRunning = false
}
}
// Clear removes everything from the cahce
func (c *Cache) Clear() {
c.mu.Lock()
for k := range c.cache {
delete(c.cache, k)
}
c.mu.Unlock()
}
// Entries returns the number of entries in the cache
func (c *Cache) Entries() int {
c.mu.Lock()
entries := len(c.cache)
c.mu.Unlock()
return entries
}

174
lib/cache/cache_test.go vendored Normal file
View File

@@ -0,0 +1,174 @@
package cache
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
called = 0
errSentinel = errors.New("an error")
errCached = errors.New("a cached error")
)
func setup(t *testing.T) (*Cache, CreateFunc) {
called = 0
create := func(path string) (interface{}, bool, error) {
assert.Equal(t, 0, called)
called++
switch path {
case "/":
return "/", true, nil
case "/file.txt":
return "/file.txt", true, errCached
case "/error":
return nil, false, errSentinel
}
panic(fmt.Sprintf("Unknown path %q", path))
}
c := New()
return c, create
}
func TestGet(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestGetFile(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, f, f2)
}
func TestGetError(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, len(c.cache))
}
func TestPut(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
c.Put("/alien", "slime")
assert.Equal(t, 1, len(c.cache))
fNew, err := c.Get("/alien", create)
require.NoError(t, err)
require.Equal(t, "slime", fNew)
assert.Equal(t, 1, len(c.cache))
}
func TestCacheExpire(t *testing.T) {
c, create := setup(t)
c.expireInterval = time.Millisecond
assert.Equal(t, false, c.expireRunning)
_, err := c.Get("/", create)
require.NoError(t, err)
c.mu.Lock()
entry := c.cache["/"]
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 1, len(c.cache))
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
assert.Equal(t, true, c.expireRunning)
c.mu.Unlock()
time.Sleep(10 * time.Millisecond)
c.mu.Lock()
assert.Equal(t, false, c.expireRunning)
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestClear(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
c.Clear()
assert.Equal(t, 0, len(c.cache))
}
func TestEntries(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, c.Entries())
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
c.Clear()
assert.Equal(t, 0, c.Entries())
}
func TestGetMaybe(t *testing.T) {
c, create := setup(t)
value, found := c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
f, err := c.Get("/", create)
require.NoError(t, err)
value, found = c.GetMaybe("/")
assert.Equal(t, true, found)
assert.Equal(t, f, value)
c.Clear()
value, found = c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
}

22
lib/random/random.go Normal file
View File

@@ -0,0 +1,22 @@
// Package random holds a few functions for working with random numbers
package random
import "math/rand"
// String create a random string for test purposes
func String(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}

13
lib/random/random_test.go Normal file
View File

@@ -0,0 +1,13 @@
package random
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestString(t *testing.T) {
for i := 0; i < 100; i++ {
assert.Equal(t, i, len(String(i)))
}
}

View File

@@ -18,6 +18,7 @@ import (
"time" "time"
"github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
) )
var ( var (
@@ -35,24 +36,6 @@ func init() {
} }
// RandomString create a random string for test purposes
func RandomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// Test contains stats about the running test which work for files or // Test contains stats about the running test which work for files or
// directories // directories
type Test struct { type Test struct {
@@ -71,7 +54,7 @@ type Test struct {
func NewTest(Dir string) *Test { func NewTest(Dir string) *Test {
t := &Test{ t := &Test{
dir: Dir, dir: Dir,
name: RandomString(*nameLength), name: random.String(*nameLength),
isDir: rand.Intn(2) == 0, isDir: rand.Intn(2) == 0,
number: atomic.AddInt32(&testNumber, 1), number: atomic.AddInt32(&testNumber, 1),
timer: time.NewTimer(*timeout), timer: time.NewTimer(*timeout),
@@ -168,7 +151,7 @@ func (t *Test) rename() {
return return
} }
t.logf("rename") t.logf("rename")
NewName := RandomString(*nameLength) NewName := random.String(*nameLength)
newPath := path.Join(t.dir, NewName) newPath := path.Join(t.dir, NewName)
err := os.Rename(t.path(), newPath) err := os.Rename(t.path(), newPath)
if err != nil { if err != nil {

View File

@@ -242,6 +242,11 @@ func New(f fs.Fs, opt *Options) *VFS {
return vfs return vfs
} }
// Fs returns the Fs passed into the New call
func (vfs *VFS) Fs() fs.Fs {
return vfs.f
}
// SetCacheMode change the cache mode // SetCacheMode change the cache mode
func (vfs *VFS) SetCacheMode(cacheMode CacheMode) { func (vfs *VFS) SetCacheMode(cacheMode CacheMode) {
vfs.Shutdown() vfs.Shutdown()