mirror of
https://github.com/rclone/rclone.git
synced 2025-12-14 15:23:18 +00:00
Compare commits
18 Commits
azure-pipe
...
azure-pipe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85882fa2de | ||
|
|
27a075e9fc | ||
|
|
5065c422b4 | ||
|
|
72d5b11d1b | ||
|
|
526a3347ac | ||
|
|
23910ba53b | ||
|
|
ee7101e6af | ||
|
|
36c1b37dd9 | ||
|
|
72782bdda6 | ||
|
|
b94eef16c1 | ||
|
|
d75fbe4852 | ||
|
|
e6ab237fcd | ||
|
|
a7eec91d69 | ||
|
|
b3e94b018c | ||
|
|
ca0e9ea55d | ||
|
|
53e3c2e263 | ||
|
|
02eb747d71 | ||
|
|
d51a970932 |
@@ -46,4 +46,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make upload_beta
|
||||
4
.gitattributes
vendored
4
.gitattributes
vendored
@@ -1,3 +1,7 @@
|
||||
# Ignore generated files in GitHub language statistics and diffs
|
||||
/MANUAL.* linguist-generated=true
|
||||
/rclone.1 linguist-generated=true
|
||||
|
||||
# Don't fiddle with the line endings of test data
|
||||
**/testdata/** -text
|
||||
**/test/** -text
|
||||
|
||||
@@ -84,7 +84,6 @@ matrix:
|
||||
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
|
||||
script:
|
||||
- make
|
||||
- make compile_all
|
||||
- go: 1.12.x
|
||||
name: macOS
|
||||
os: osx
|
||||
@@ -120,9 +119,11 @@ matrix:
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
script:
|
||||
- make beta
|
||||
- [[ "$TRAVIS_PULL_REQUEST" == "false" ]] && make upload_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: rclone/rclone
|
||||
all_branches: true
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true
|
||||
condition: $DEPLOY == true
|
||||
22
Makefile
22
Makefile
@@ -17,7 +17,10 @@ ifneq ($(TAG),$(LAST_TAG))
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
@@ -142,7 +145,7 @@ upload_github:
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
test_beta:
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
|
||||
@@ -153,13 +156,6 @@ log_since_last_release:
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
@@ -167,15 +163,17 @@ ifndef BRANCH_PATH
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
beta:
|
||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
|
||||
upload_beta: rclone
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
|
||||
@@ -1,31 +1,33 @@
|
||||
---
|
||||
# Azure pipelines build for rclone
|
||||
# Parts stolen shamelessly from all round the Internet, especially Caddy
|
||||
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
|
||||
|
||||
trigger:
|
||||
branches:
|
||||
include:
|
||||
- '*'
|
||||
tags:
|
||||
include:
|
||||
- '*'
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOCACHE: $(system.defaultWorkingDirectory)/gocache
|
||||
GOBIN: $(GOPATH)/bin
|
||||
GOMAXPROCS: 8 # workaround for cmd/mount tests locking up - see #3154
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
GO_INSTALL_ARCH: amd64
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
go1.9:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.8
|
||||
MAKE_QUICKTEST: true
|
||||
linux:
|
||||
imageName: ubuntu-16.04
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: cmount
|
||||
@@ -33,51 +35,64 @@ strategy:
|
||||
MAKE_CHECK: true
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
MAKE_COMPILE_ALL: true
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-16.04
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
mac:
|
||||
imageName: macos-10.13
|
||||
imageName: macos-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GOTAGS: "" # cmount doesn't work on osx travis for some reason
|
||||
BUILD_FLAGS: '-include "^darwin/" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
RACEMAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows:
|
||||
imageName: windows-2019
|
||||
windows_amd64:
|
||||
imageName: windows-latest
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
|
||||
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
windows_386:
|
||||
imageName: windows-latest
|
||||
gorootDir: C:\
|
||||
GO_VERSION: latest
|
||||
GO_INSTALL_ARCH: 386
|
||||
BUILD_FLAGS: '-include "^windows/386" -cgo'
|
||||
MAKE_QUICKTEST: true
|
||||
DEPLOY: true
|
||||
other_os:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
|
||||
DEPLOY: true
|
||||
modules_race:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: latest
|
||||
GO111MODULE: on
|
||||
GOPROXY: https://proxy.golang.org
|
||||
MAKE_QUICKTEST: true
|
||||
MAKE_RACEQUICKTEST: true
|
||||
go1.9:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GOCACHE: '' # build caching only came in go1.10
|
||||
GO_VERSION: go1.9.7
|
||||
MAKE_QUICKTEST: true
|
||||
go1.10:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.10.8
|
||||
MAKE_QUICKTEST: true
|
||||
go1.11:
|
||||
imageName: ubuntu-latest
|
||||
gorootDir: /usr/local
|
||||
GO_VERSION: go1.11.12
|
||||
MAKE_QUICKTEST: true
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
|
||||
variables:
|
||||
GOROOT: $(gorootDir)/go
|
||||
GOPATH: $(system.defaultWorkingDirectory)/gopath
|
||||
GOBIN: $(GOPATH)/bin
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
|
||||
GO111MODULE: 'off'
|
||||
GOTAGS: cmount
|
||||
GO_LATEST: false
|
||||
CPATH: ''
|
||||
|
||||
steps:
|
||||
- bash: |
|
||||
latestGo=$(curl "https://golang.org/VERSION?m=text")
|
||||
@@ -97,6 +112,14 @@ steps:
|
||||
mv !(gopath) '$(modulePath)'
|
||||
displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH
|
||||
|
||||
- task: CacheBeta@0
|
||||
continueOnError: true
|
||||
inputs:
|
||||
key: go-build-cache | "$(Agent.JobName)"
|
||||
path: $(GOCACHE)
|
||||
displayName: Cache go build
|
||||
condition: ne( variables['GOCACHE'], '' )
|
||||
|
||||
# Install Libraries (varies by platform)
|
||||
|
||||
- bash: |
|
||||
@@ -115,49 +138,63 @@ steps:
|
||||
displayName: Install Libraries on macOS
|
||||
|
||||
- powershell: |
|
||||
choco install -y winfsp zip make
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GO_INSTALL_ARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Libraries on Windows
|
||||
|
||||
|
||||
# Install Go (this varies by platform)
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo mkdir $(gorootDir)
|
||||
sudo chown ${USER}:${USER} $(gorootDir)
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
|
||||
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Linux' )
|
||||
displayName: Install Go on Linux
|
||||
|
||||
- bash: |
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
|
||||
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
|
||||
condition: eq( variables['Agent.OS'], 'Darwin' )
|
||||
displayName: Install Go on macOS
|
||||
|
||||
- powershell: |
|
||||
Write-Host "Downloading Go... (please be patient, I am very slow)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
|
||||
Write-Host "Extracting Go... (I'm slow too)"
|
||||
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
|
||||
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
|
||||
Write-Host "Extracting Go"
|
||||
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
|
||||
condition: eq( variables['Agent.OS'], 'Windows_NT' )
|
||||
displayName: Install Go on Windows
|
||||
|
||||
# Display environment for debugging
|
||||
|
||||
- bash: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Print Go version and environment
|
||||
|
||||
- script: |
|
||||
make
|
||||
# Run Tests
|
||||
|
||||
- bash: |
|
||||
make quicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Run tests
|
||||
@@ -167,7 +204,7 @@ steps:
|
||||
make racequicktest
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Race test
|
||||
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
|
||||
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make build_dep
|
||||
@@ -177,13 +214,21 @@ steps:
|
||||
condition: eq( variables['MAKE_CHECK'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make compile_all
|
||||
make beta
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Compile all architectures test
|
||||
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
|
||||
displayName: Do release build
|
||||
condition: eq( variables['DEPLOY'], 'true' )
|
||||
|
||||
- bash: |
|
||||
make vars # FIXME travis_beta
|
||||
make upload_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
|
||||
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: Deploy built binaries
|
||||
displayName: Upload built binaries
|
||||
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
|
||||
|
||||
- publish: $(modulePath)/build
|
||||
artifact: "rclone-build-$(Agent.JobName)"
|
||||
displayName: Publish built binaries
|
||||
condition: eq( variables['DEPLOY'], 'true' )
|
||||
|
||||
5
backend/cache/cache_internal_test.go
vendored
5
backend/cache/cache_internal_test.go
vendored
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -355,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData1 = []byte(fstest.RandomString(100))
|
||||
testData2 = []byte(fstest.RandomString(200))
|
||||
testData1 = []byte(random.String(100))
|
||||
testData2 = []byte(random.String(200))
|
||||
}
|
||||
|
||||
// write the object
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + fstest.RandomString(24)
|
||||
albumName := "album/rclone-test-" + random.String(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
25
bin/test_proxy.py
Executable file
25
bin/test_proxy.py
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
A demo proxy for rclone serve sftp/webdav/ftp etc
|
||||
|
||||
This takes the incoming user/pass and converts it into an sftp backend
|
||||
running on localhost.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
def main():
|
||||
i = json.load(sys.stdin)
|
||||
o = {
|
||||
"type": "sftp", # type of backend
|
||||
"_root": "", # root of the fs
|
||||
"_obscure": "pass", # comma sep list of fields to obscure
|
||||
"user": i["user"],
|
||||
"pass": i["pass"],
|
||||
"host": "127.0.0.1",
|
||||
}
|
||||
json.dump(o, sys.stdout, indent="\t")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -9,6 +9,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -492,6 +493,7 @@ func AddBackendFlags() {
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
setupRootCommand(Root)
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -118,7 +118,7 @@ func (r *results) Print() {
|
||||
|
||||
// writeFile writes a file with some random contents
|
||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||
contents := fstest.RandomString(50)
|
||||
contents := random.String(50)
|
||||
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func checkRelease(shouldUpdate bool) (err error) {
|
||||
extractPath := filepath.Join(cachePath, "current")
|
||||
|
||||
if !exists(cachePath) {
|
||||
if err := os.MkdirAll(cachePath, 755); err != nil {
|
||||
if err := os.MkdirAll(cachePath, 0755); err != nil {
|
||||
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
|
||||
}
|
||||
}
|
||||
@@ -177,14 +177,14 @@ func unzip(src, dest string) (err error) {
|
||||
path := filepath.Join(dest, f.Name)
|
||||
|
||||
if f.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(path, f.Mode()); err != nil {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.MkdirAll(filepath.Dir(path), f.Mode()); err != nil {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,30 +5,68 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/user"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpflags"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
// AddFlags adds flags for ftp
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
|
||||
}
|
||||
|
||||
func init() {
|
||||
ftpflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
AddFlags(Command.Flags())
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -39,12 +77,33 @@ var Command = &cobra.Command{
|
||||
rclone serve ftp implements a basic ftp server to serve the
|
||||
remote over FTP protocol. This can be viewed with a ftp client
|
||||
or you can make a remote of type ftp to read and write it.
|
||||
` + ftpopt.Help + vfs.Help,
|
||||
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs. By default it only listens on localhost. You can use port
|
||||
:0 to let the OS choose an available port.
|
||||
|
||||
If you set --addr to listen on a public or LAN accessible IP address
|
||||
then using Authentication is advised - see the next section for info.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
` + vfs.Help + proxy.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s, err := newServer(f, &ftpflags.Opt)
|
||||
s, err := newServer(f, &Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -55,12 +114,17 @@ or you can make a remote of type ftp to read and write it.
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
f fs.Fs
|
||||
srv *ftp.Server
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
proxy *proxy.Proxy
|
||||
pendingMu sync.Mutex
|
||||
pending map[string]*Driver // pending Driver~s that haven't got their VFS
|
||||
}
|
||||
|
||||
// Make a new FTP to serve the remote
|
||||
func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
|
||||
func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
host, port, err := net.SplitHostPort(opt.ListenAddr)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse host:port")
|
||||
@@ -70,27 +134,31 @@ func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
|
||||
return nil, errors.New("Failed to parse host:port")
|
||||
}
|
||||
|
||||
s := &server{
|
||||
f: f,
|
||||
opt: *opt,
|
||||
pending: make(map[string]*Driver),
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(&proxyflags.Opt)
|
||||
} else {
|
||||
s.vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
|
||||
ftpopt := &ftp.ServerOpts{
|
||||
Name: "Rclone FTP Server",
|
||||
WelcomeMessage: "Welcome on Rclone FTP Server",
|
||||
Factory: &DriverFactory{
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
},
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: &Auth{
|
||||
BasicUser: opt.BasicUser,
|
||||
BasicPass: opt.BasicPass,
|
||||
},
|
||||
Logger: &Logger{},
|
||||
WelcomeMessage: "Welcome to Rclone " + fs.Version + " FTP Server",
|
||||
Factory: s, // implemented by NewDriver method
|
||||
Hostname: host,
|
||||
Port: portNum,
|
||||
PublicIp: opt.PublicIP,
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: s, // implemented by CheckPasswd method
|
||||
Logger: &Logger{},
|
||||
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
|
||||
}
|
||||
return &server{
|
||||
f: f,
|
||||
srv: ftp.NewServer(ftpopt),
|
||||
}, nil
|
||||
s.srv = ftp.NewServer(ftpopt)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// serve runs the ftp server
|
||||
@@ -132,39 +200,106 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
|
||||
fs.Infof(sessionID, "< %d %s", code, message)
|
||||
}
|
||||
|
||||
//Auth struct to handle ftp auth (temporary simple for POC)
|
||||
type Auth struct {
|
||||
BasicUser string
|
||||
BasicPass string
|
||||
// findID finds the connection ID of the calling program. It does
|
||||
// this in an incredibly hacky way by looking in the stack trace.
|
||||
//
|
||||
// callerName should be the name of the function that we are looking
|
||||
// for with a trailing '('
|
||||
//
|
||||
// What is really needed is a change of calling protocol so
|
||||
// CheckPassword is called with the connection.
|
||||
func findID(callerName []byte) (string, error) {
|
||||
// Dump the stack in this format
|
||||
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
buf = buf[:n]
|
||||
|
||||
// look for callerName first
|
||||
i := bytes.Index(buf, callerName)
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: caller name not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[i+len(callerName):]
|
||||
|
||||
// find next ')'
|
||||
i = bytes.IndexByte(buf, ')')
|
||||
if i < 0 {
|
||||
return "", errors.Errorf("findID: end of args not found in:\n%s", buf)
|
||||
}
|
||||
buf = buf[:i]
|
||||
|
||||
// trim off first argument
|
||||
// find next ','
|
||||
i = bytes.IndexByte(buf, ',')
|
||||
if i >= 0 {
|
||||
buf = buf[:i]
|
||||
}
|
||||
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
//CheckPasswd handle auth based on configuration
|
||||
func (a *Auth) CheckPasswd(user, pass string) (bool, error) {
|
||||
return a.BasicUser == user && (a.BasicPass == "" || a.BasicPass == pass), nil
|
||||
var connServeFunction = []byte("(*Conn).Serve(")
|
||||
|
||||
// CheckPasswd handle auth based on configuration
|
||||
func (s *server) CheckPasswd(user, pass string) (ok bool, err error) {
|
||||
var VFS *vfs.VFS
|
||||
if s.proxy != nil {
|
||||
VFS, _, err = s.proxy.Call(user, pass)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
id, err := findID(connServeFunction)
|
||||
if err != nil {
|
||||
fs.Infof(nil, "proxy login failed: failed to read ID from stack: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
s.pendingMu.Lock()
|
||||
d := s.pending[id]
|
||||
delete(s.pending, id)
|
||||
s.pendingMu.Unlock()
|
||||
if d == nil {
|
||||
return false, errors.Errorf("proxy login failed: failed to find pending Driver under ID %q", id)
|
||||
}
|
||||
d.vfs = VFS
|
||||
} else {
|
||||
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
|
||||
if !ok {
|
||||
fs.Infof(nil, "login failed: bad credentials")
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
//DriverFactory factory of ftp driver for each session
|
||||
type DriverFactory struct {
|
||||
vfs *vfs.VFS
|
||||
}
|
||||
|
||||
//NewDriver start a new session
|
||||
func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
|
||||
// NewDriver starts a new session for each client connection
|
||||
func (s *server) NewDriver() (ftp.Driver, error) {
|
||||
log.Trace("", "Init driver")("")
|
||||
return &Driver{
|
||||
vfs: f.vfs,
|
||||
}, nil
|
||||
d := &Driver{
|
||||
s: s,
|
||||
vfs: s.vfs, // this can be nil if proxy set
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
//Driver implementation of ftp server
|
||||
type Driver struct {
|
||||
s *server
|
||||
vfs *vfs.VFS
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
//Init a connection
|
||||
func (d *Driver) Init(*ftp.Conn) {
|
||||
func (d *Driver) Init(c *ftp.Conn) {
|
||||
defer log.Trace("", "Init session")("")
|
||||
if d.s.proxy != nil {
|
||||
id := fmt.Sprintf("%p", c)
|
||||
d.s.pendingMu.Lock()
|
||||
d.s.pending[id] = d
|
||||
d.s.pendingMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
//Stat get information on file or folder
|
||||
|
||||
@@ -8,83 +8,72 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testHOST = "localhost"
|
||||
testPORT = "51780"
|
||||
testPASSIVEPORTRANGE = "30000-32000"
|
||||
testUSER = "rclone"
|
||||
testPASS = "password"
|
||||
)
|
||||
|
||||
// TestFTP runs the ftp server then runs the unit tests for the
|
||||
// ftp remote against it.
|
||||
func TestFTP(t *testing.T) {
|
||||
opt := ftpopt.DefaultOpt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.BasicUser = "rclone"
|
||||
opt.BasicPass = "password"
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.BasicUser = testUSER
|
||||
opt.BasicPass = testPASS
|
||||
|
||||
fstest.Initialise()
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Start the server
|
||||
w, err := newServer(fremote, &opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
go func() {
|
||||
err := w.serve()
|
||||
if err != ftp.ErrServerClosed {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
err := w.close()
|
||||
w, err := newServer(f, &opt)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/ftp")
|
||||
assert.NoError(t, err, "failed to cd to ftp remote")
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
err := w.serve()
|
||||
close(quit)
|
||||
if err != ftp.ErrServerClosed {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Run the ftp tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "ftp",
|
||||
"host": testHOST,
|
||||
"port": testPORT,
|
||||
"user": testUSER,
|
||||
"pass": obscure.MustObscure(testPASS),
|
||||
}
|
||||
|
||||
return config, func() {
|
||||
err := w.close()
|
||||
assert.NoError(t, err)
|
||||
<-quit
|
||||
}
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
|
||||
args = append(args, "-remote", "ftptest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_FTPTEST_TYPE=ftp",
|
||||
"RCLONE_CONFIG_FTPTEST_HOST="+testHOST,
|
||||
"RCLONE_CONFIG_FTPTEST_PORT="+testPORT,
|
||||
"RCLONE_CONFIG_FTPTEST_USER=rclone",
|
||||
"RCLONE_CONFIG_FTPTEST_PASS=0HU5Hx42YiLoNGJxppOOP3QTbr-KB_MP", // ./rclone obscure password
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running ftp integration tests")
|
||||
|
||||
servetest.Run(t, "ftp", start)
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
id, err := findID([]byte("TestFindID("))
|
||||
require.NoError(t, err)
|
||||
// id should be the argument to this function
|
||||
assert.Equal(t, fmt.Sprintf("%p", t), id)
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package ftpflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = ftpopt.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlagsPrefix adds flags for the ftpopt
|
||||
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the httplib
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
AddFlagsPrefix(flagSet, "", &Opt)
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package ftpopt
|
||||
|
||||
// Help contains text describing the http server to add to the command
|
||||
// help.
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use --addr to specify which IP address and port the server should
|
||||
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
|
||||
IPs. By default it only listens on localhost. You can use port
|
||||
:0 to let the OS choose an available port.
|
||||
|
||||
If you set --addr to listen on a public or LAN accessible IP address
|
||||
then using Authentication is advised - see the next section for info.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
`
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
mux.HandleFunc("/", s.handler)
|
||||
mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -93,7 +93,10 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Server", "rclone/"+fs.Version)
|
||||
|
||||
urlPath := r.URL.Path
|
||||
urlPath, ok := s.Path(w, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
isDir := strings.HasSuffix(urlPath, "/")
|
||||
remote := strings.Trim(urlPath, "/")
|
||||
if isDir {
|
||||
|
||||
@@ -26,6 +26,9 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
|
||||
if prefix == "" {
|
||||
flags.StringVarP(flagSet, &Opt.Prefix, prefix+"prefix", "", Opt.Prefix, "Prefix for URLs.")
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the httplib
|
||||
|
||||
@@ -44,6 +44,11 @@ for a transfer.
|
||||
--max-header-bytes controls the maximum number of bytes the server will
|
||||
accept in the HTTP header.
|
||||
|
||||
--prefix controls the URL prefix that rclone serves from. By default
|
||||
rclone will serve from the root. If you used --prefix "rclone" then
|
||||
rclone would serve from a URL starting with "/rclone/". This is
|
||||
useful if you wish to proxy rclone serve.
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
@@ -81,6 +86,7 @@ certificate authority certificate.
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
ListenAddr string // Port to listen on
|
||||
Prefix string // prefix to strip from URLs
|
||||
ServerReadTimeout time.Duration // Timeout for server reading data
|
||||
ServerWriteTimeout time.Duration // Timeout for server writing data
|
||||
MaxHeaderBytes int // Maximum size of request header
|
||||
@@ -91,8 +97,15 @@ type Options struct {
|
||||
Realm string // realm for authentication
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
Auth AuthFn // custom Auth (not set by command line flags)
|
||||
}
|
||||
|
||||
// AuthFn if used will be used to authenticate user, pass. If an error
|
||||
// is returned then the user is not authenticated.
|
||||
//
|
||||
// If a non nil value is returned then it is added to the context under the key
|
||||
type AuthFn func(user, pass string) (value interface{}, err error)
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:8080",
|
||||
@@ -117,9 +130,14 @@ type Server struct {
|
||||
|
||||
type contextUserType struct{}
|
||||
|
||||
// ContextUserKey is a simple context key
|
||||
// ContextUserKey is a simple context key for storing the username of the request
|
||||
var ContextUserKey = &contextUserType{}
|
||||
|
||||
type contextAuthType struct{}
|
||||
|
||||
// ContextAuthKey is a simple context key for storing info returned by AuthFn
|
||||
var ContextAuthKey = &contextAuthType{}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
func (s *Server) singleUserProvider(user, realm string) string {
|
||||
if user == s.Opt.BasicUser {
|
||||
@@ -128,6 +146,27 @@ func (s *Server) singleUserProvider(user, realm string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseAuthorization parses the Authorization header into user, pass
|
||||
// it returns a boolean as to whether the parse was successful
|
||||
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
parts := strings.SplitN(string(b), ":", 2)
|
||||
user = parts[0]
|
||||
if len(parts) > 1 {
|
||||
pass = parts[1]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewServer creates an http server. The opt can be nil in which case
|
||||
// the default options will be used.
|
||||
func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
@@ -143,17 +182,20 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
}
|
||||
|
||||
// Use htpasswd if required on everything
|
||||
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" {
|
||||
var secretProvider auth.SecretProvider
|
||||
if s.Opt.HtPasswd != "" {
|
||||
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
|
||||
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
|
||||
} else {
|
||||
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
|
||||
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
|
||||
secretProvider = s.singleUserProvider
|
||||
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil {
|
||||
var authenticator *auth.BasicAuth
|
||||
if s.Opt.Auth == nil {
|
||||
var secretProvider auth.SecretProvider
|
||||
if s.Opt.HtPasswd != "" {
|
||||
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
|
||||
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
|
||||
} else {
|
||||
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
|
||||
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
}
|
||||
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
oldHandler := handler
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// No auth wanted for OPTIONS method
|
||||
@@ -161,26 +203,36 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
var userName = "UNKNOWN"
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
userName = strings.SplitN(string(b), ":", 2)[0]
|
||||
}
|
||||
}
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
|
||||
} else {
|
||||
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
|
||||
}
|
||||
authenticator.RequireAuth(w, r)
|
||||
} else {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, username))
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
unauthorized := func() {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`)
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
}
|
||||
user, pass, authValid := parseAuthorization(r)
|
||||
if !authValid {
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if s.Opt.Auth == nil {
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Custom Auth
|
||||
value, err := s.Opt.Auth(user, pass)
|
||||
if err != nil {
|
||||
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if value != nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value))
|
||||
}
|
||||
}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user))
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
})
|
||||
s.usingAuth = true
|
||||
}
|
||||
@@ -190,6 +242,14 @@ func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
log.Fatalf("Need both -cert and -key to use SSL")
|
||||
}
|
||||
|
||||
// If a Path is set then serve from there
|
||||
if strings.HasSuffix(s.Opt.Prefix, "/") {
|
||||
s.Opt.Prefix = s.Opt.Prefix[:len(s.Opt.Prefix)-1]
|
||||
}
|
||||
if s.Opt.Prefix != "" && !strings.HasPrefix(s.Opt.Prefix, "/") {
|
||||
s.Opt.Prefix = "/" + s.Opt.Prefix
|
||||
}
|
||||
|
||||
// FIXME make a transport?
|
||||
s.httpServer = &http.Server{
|
||||
Addr: s.Opt.ListenAddr,
|
||||
@@ -299,10 +359,27 @@ func (s *Server) URL() string {
|
||||
// (i.e. port assigned by operating system)
|
||||
addr = s.listener.Addr().String()
|
||||
}
|
||||
return fmt.Sprintf("%s://%s/", proto, addr)
|
||||
return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.Prefix)
|
||||
}
|
||||
|
||||
// UsingAuth returns true if authentication is required
|
||||
func (s *Server) UsingAuth() bool {
|
||||
return s.usingAuth
|
||||
}
|
||||
|
||||
// Path returns the current path with the Prefix stripped
|
||||
//
|
||||
// If it returns false, then the path was invalid and the handler
|
||||
// should exit as the error response has already been sent
|
||||
func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) {
|
||||
Path = r.URL.Path
|
||||
if s.Opt.Prefix == "" {
|
||||
return Path, true
|
||||
}
|
||||
if !strings.HasPrefix(Path, s.Opt.Prefix+"/") {
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return Path, false
|
||||
}
|
||||
Path = Path[len(s.Opt.Prefix):]
|
||||
return Path, true
|
||||
}
|
||||
|
||||
270
cmd/serve/proxy/proxy.go
Normal file
270
cmd/serve/proxy/proxy.go
Normal file
@@ -0,0 +1,270 @@
|
||||
// Package proxy implements a programmable proxy for rclone serve
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
libcache "github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
// Help contains text describing how to use the proxy
|
||||
var Help = strings.Replace(`
|
||||
### Auth Proxy
|
||||
|
||||
If you supply the parameter |--auth-proxy /path/to/program| then
|
||||
rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocl with input on STDIN and output on STDOUT.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
|
||||
The program's job is to take a |user| and |pass| on the input and turn
|
||||
those into the config for a backend on STDOUT in JSON format. This
|
||||
config will have any default parameters for the backend added, but it
|
||||
won't use configuration from environment variables or command line
|
||||
options - it is the job of the proxy program to make a complete
|
||||
config.
|
||||
|
||||
This config generated must have this extra parameter
|
||||
- |_root| - root to use for the backend
|
||||
|
||||
And it may have this parameter
|
||||
- |_obscure| - comma separated strings for parameters to obscure
|
||||
|
||||
For example the program might take this on STDIN
|
||||
|
||||
|||
|
||||
{
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
}
|
||||
|||
|
||||
|
||||
And return this on STDOUT
|
||||
|
||||
|||
|
||||
{
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
}
|
||||
|||
|
||||
|
||||
This would mean that an SFTP backend would be created on the fly for
|
||||
the |user| and |pass| returned in the output to the host given. Note
|
||||
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
|
||||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
|
||||
The progam can manipulate the supplied |user| in any way, for example
|
||||
to make proxy to many different sftp backends, you could make the
|
||||
|user| be |user@example.com| and then set the |host| to |example.com|
|
||||
in the output and the user to |user|. For security you'd probably want
|
||||
to restrict the |host| to a limited list.
|
||||
|
||||
Note that an internal cache is keyed on |user| so only use that for
|
||||
configuration, don't use |pass|. This also means that if a user's
|
||||
password is changed the cache will need to expire (which takes 5 mins)
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
backend that rclone supports.
|
||||
`, "|", "`", -1)
|
||||
|
||||
// Options is options for creating the proxy
|
||||
type Options struct {
|
||||
AuthProxy string
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values uses for Opt
|
||||
var DefaultOpt = Options{
|
||||
AuthProxy: "",
|
||||
}
|
||||
|
||||
// Proxy represents a proxy to turn auth requests into a VFS
|
||||
type Proxy struct {
|
||||
cmdLine []string // broken down command line
|
||||
vfsCache *libcache.Cache
|
||||
Opt Options
|
||||
}
|
||||
|
||||
// cacheEntry is what is stored in the vfsCache
|
||||
type cacheEntry struct {
|
||||
vfs *vfs.VFS // stored VFS
|
||||
pwHash []byte // bcrypt hash of the password
|
||||
}
|
||||
|
||||
// New creates a new proxy with the Options passed in
|
||||
func New(opt *Options) *Proxy {
|
||||
return &Proxy{
|
||||
Opt: *opt,
|
||||
cmdLine: strings.Fields(opt.AuthProxy),
|
||||
vfsCache: libcache.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// run the proxy command returning a config map
|
||||
func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
|
||||
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
|
||||
inBytes, err := json.MarshalIndent(in, "", "\t")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
|
||||
}
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdin = bytes.NewBuffer(inBytes)
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
start := time.Now()
|
||||
err = cmd.Run()
|
||||
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
|
||||
duration := time.Since(start)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
|
||||
}
|
||||
err = json.Unmarshal(stdout.Bytes(), &config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
|
||||
}
|
||||
fs.Debugf(nil, "Proxy returned in %v", duration)
|
||||
|
||||
// Obscure any values in the config map that need it
|
||||
obscureFields, ok := config.Get("_obscure")
|
||||
if ok {
|
||||
for _, key := range strings.Split(obscureFields, ",") {
|
||||
value, ok := config.Get(key)
|
||||
if ok {
|
||||
obscuredValue, err := obscure.Obscure(value)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "proxy")
|
||||
}
|
||||
config.Set(key, obscuredValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// call runs the auth proxy and returns a cacheEntry and an error
|
||||
func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}, err error) {
|
||||
// Contact the proxy
|
||||
config, err := p.run(map[string]string{
|
||||
"user": user,
|
||||
"pass": pass,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look for required fields in the answer
|
||||
fsName, ok := config.Get("type")
|
||||
if !ok {
|
||||
return nil, errors.New("proxy: type not set in result")
|
||||
}
|
||||
root, ok := config.Get("_root")
|
||||
if !ok {
|
||||
return nil, errors.New("proxy: _root not set in result")
|
||||
}
|
||||
|
||||
// Find the backend
|
||||
fsInfo, err := fs.Find(fsName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
|
||||
}
|
||||
|
||||
// base name of config on user name. This may appear in logs
|
||||
name := "proxy-" + user
|
||||
fsString := name + ":" + root
|
||||
|
||||
// Look for fs in the VFS cache
|
||||
value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) {
|
||||
// Create the Fs from the cache
|
||||
f, err := cache.GetFn(fsString, func(fsString string) (fs.Fs, error) {
|
||||
// Update the config with the default values
|
||||
for i := range fsInfo.Options {
|
||||
o := &fsInfo.Options[i]
|
||||
if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" {
|
||||
config.Set(o.Name, o.String())
|
||||
}
|
||||
}
|
||||
return fsInfo.NewFs(name, root, config)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
entry := cacheEntry{
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
pwHash: pwHash,
|
||||
}
|
||||
return entry, true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "proxy: failed to create backend")
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Call runs the auth proxy with the given input, returning a *vfs.VFS
|
||||
// and the key used in the VFS cache.
|
||||
func (p *Proxy) Call(user, pass string) (VFS *vfs.VFS, vfsKey string, err error) {
|
||||
var passwordBytes = []byte(pass)
|
||||
|
||||
// Look in the cache first
|
||||
value, ok := p.vfsCache.GetMaybe(user)
|
||||
|
||||
// If not found then call the proxy for a fresh answer
|
||||
if !ok {
|
||||
value, err = p.call(user, pass, passwordBytes)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
// check we got what we were expecting
|
||||
entry, ok := value.(cacheEntry)
|
||||
if !ok {
|
||||
return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
|
||||
}
|
||||
|
||||
// Check the password is correct in the cached entry. This
|
||||
// prevents an attack where subsequent requests for the same
|
||||
// user don't have their auth checked. It does mean that if
|
||||
// the password is changed, the user will have to wait for
|
||||
// cache expiry (5m) before trying again.
|
||||
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "proxy: incorrect password")
|
||||
}
|
||||
|
||||
return entry.vfs, user, nil
|
||||
}
|
||||
|
||||
// Get VFS from the cache using key - returns nil if not found
|
||||
func (p *Proxy) Get(key string) *vfs.VFS {
|
||||
value, ok := p.vfsCache.GetMaybe(key)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
entry := value.(cacheEntry)
|
||||
return entry.vfs
|
||||
}
|
||||
41
cmd/serve/proxy/proxy_code.go
Normal file
41
cmd/serve/proxy/proxy_code.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// +build ignore
|
||||
|
||||
// A simple auth proxy for testing purposes
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Read the input
|
||||
var in map[string]string
|
||||
err := json.NewDecoder(os.Stdin).Decode(&in)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Write the output
|
||||
var out = map[string]string{}
|
||||
for k, v := range in {
|
||||
switch k {
|
||||
case "user":
|
||||
v += "-test"
|
||||
case "error":
|
||||
log.Fatal(v)
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
if out["type"] == "" {
|
||||
out["type"] = "local"
|
||||
}
|
||||
if out["_root"] == "" {
|
||||
out["_root"] = ""
|
||||
}
|
||||
json.NewEncoder(os.Stdout).Encode(&out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
145
cmd/serve/proxy/proxy_test.go
Normal file
145
cmd/serve/proxy/proxy_test.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
opt := DefaultOpt
|
||||
cmd := "go run proxy_code.go"
|
||||
opt.AuthProxy = cmd
|
||||
p := New(&opt)
|
||||
|
||||
t.Run("Normal", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"type": "ftp",
|
||||
"user": "me",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"type": "ftp",
|
||||
"user": "me-test",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_root": "",
|
||||
}, config)
|
||||
})
|
||||
|
||||
t.Run("Error", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"error": "potato",
|
||||
})
|
||||
assert.Nil(t, config)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "potato")
|
||||
})
|
||||
|
||||
t.Run("Obscure", func(t *testing.T) {
|
||||
config, err := p.run(map[string]string{
|
||||
"type": "ftp",
|
||||
"user": "me",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_obscure": "pass,user",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
config["user"] = obscure.MustReveal(config["user"])
|
||||
config["pass"] = obscure.MustReveal(config["pass"])
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"type": "ftp",
|
||||
"user": "me-test",
|
||||
"pass": "pass",
|
||||
"host": "127.0.0.1",
|
||||
"_obscure": "pass,user",
|
||||
"_root": "",
|
||||
}, config)
|
||||
})
|
||||
|
||||
const testUser = "testUser"
|
||||
const testPass = "testPass"
|
||||
|
||||
t.Run("call", func(t *testing.T) {
|
||||
// check cache empty
|
||||
assert.Equal(t, 0, p.vfsCache.Entries())
|
||||
defer p.vfsCache.Clear()
|
||||
|
||||
passwordBytes := []byte(testPass)
|
||||
value, err := p.call(testUser, testPass, passwordBytes)
|
||||
require.NoError(t, err)
|
||||
entry, ok := value.(cacheEntry)
|
||||
require.True(t, ok)
|
||||
|
||||
// check hash is correct in entry
|
||||
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, value, cacheValue)
|
||||
})
|
||||
|
||||
t.Run("Call", func(t *testing.T) {
|
||||
// check cache empty
|
||||
assert.Equal(t, 0, p.vfsCache.Entries())
|
||||
defer p.vfsCache.Clear()
|
||||
|
||||
vfs, vfsKey, err := p.Call(testUser, testPass)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
|
||||
assert.True(t, ok)
|
||||
cacheEntry, ok := cacheValue.(cacheEntry)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, vfs, cacheEntry.vfs)
|
||||
|
||||
// Test Get works while we have something in the cache
|
||||
t.Run("Get", func(t *testing.T) {
|
||||
assert.Equal(t, vfs, p.Get(testUser))
|
||||
assert.Nil(t, p.Get("unknown"))
|
||||
})
|
||||
|
||||
// now try again from the cache
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
|
||||
// now try again from the cache but with wrong password
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass+"wrong")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "incorrect password")
|
||||
require.Nil(t, vfs)
|
||||
require.Equal(t, "", vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
assert.Equal(t, 1, p.vfsCache.Entries())
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
18
cmd/serve/proxy/proxyflags/proxyflags.go
Normal file
18
cmd/serve/proxy/proxyflags/proxyflags.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Package proxyflags implements command line flags to set up a proxy
|
||||
package proxyflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = proxy.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.")
|
||||
}
|
||||
@@ -171,7 +171,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
|
||||
Server: httplib.NewServer(mux, opt),
|
||||
f: f,
|
||||
}
|
||||
mux.HandleFunc("/", s.handler)
|
||||
mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -211,7 +211,10 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Server", "rclone/"+fs.Version)
|
||||
|
||||
path := r.URL.Path
|
||||
path, ok := s.Path(w, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
remote := makeRemote(path)
|
||||
fs.Debugf(s.f, "%s %s", r.Method, path)
|
||||
|
||||
|
||||
35
cmd/serve/servetest/proxy_code.go
Normal file
35
cmd/serve/servetest/proxy_code.go
Normal file
@@ -0,0 +1,35 @@
|
||||
// +build ignore
|
||||
|
||||
// A simple auth proxy for testing purposes
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
log.Fatalf("Syntax: %s <root>", os.Args[0])
|
||||
}
|
||||
root := os.Args[1]
|
||||
|
||||
// Read the input
|
||||
var in map[string]string
|
||||
err := json.NewDecoder(os.Stdin).Decode(&in)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Write the output
|
||||
var out = map[string]string{
|
||||
"type": "local",
|
||||
"_root": root,
|
||||
"_obscure": "pass",
|
||||
}
|
||||
json.NewEncoder(os.Stdout).Encode(&out)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
107
cmd/serve/servetest/servetest.go
Normal file
107
cmd/serve/servetest/servetest.go
Normal file
@@ -0,0 +1,107 @@
|
||||
// Package servetest provides infrastructure for running loopback
|
||||
// tests of "rclone serve backend:" against the backend integration
|
||||
// tests.
|
||||
package servetest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// StartFn describes the callback which should start the server with
|
||||
// the Fs passed in.
|
||||
// It should return a config for the backend used to connect to the
|
||||
// server and a clean up function
|
||||
type StartFn func(f fs.Fs) (configmap.Simple, func())
|
||||
|
||||
// run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func run(t *testing.T, name string, start StartFn, useProxy bool) {
|
||||
fstest.Initialise()
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
f := fremote
|
||||
if useProxy {
|
||||
// If using a proxy don't pass in the backend
|
||||
f = nil
|
||||
|
||||
// the backend config will be made by the proxy
|
||||
prog, err := filepath.Abs("../servetest/proxy_code.go")
|
||||
require.NoError(t, err)
|
||||
cmd := "go run " + prog + " " + fremote.Root()
|
||||
|
||||
// FIXME this is untidy setting a global variable!
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
}
|
||||
config, cleanup := start(f)
|
||||
defer cleanup()
|
||||
|
||||
// Change directory to run the tests
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
err = os.Chdir("../../../backend/" + name)
|
||||
require.NoError(t, err, "failed to cd to "+name+" backend")
|
||||
defer func() {
|
||||
// Change back to the old directory
|
||||
require.NoError(t, os.Chdir(cwd))
|
||||
}()
|
||||
|
||||
// Run the backend tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
remoteName := name + "test:"
|
||||
args = append(args, "-remote", remoteName)
|
||||
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
// Configure the backend with environment variables
|
||||
cmd.Env = os.Environ()
|
||||
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
|
||||
for k, v := range config {
|
||||
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
|
||||
}
|
||||
|
||||
// Run the test
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running "+name+" integration tests")
|
||||
}
|
||||
|
||||
// Run runs the server then runs the unit tests for the remote against
|
||||
// it.
|
||||
func Run(t *testing.T, name string, start StartFn) {
|
||||
t.Run("Normal", func(t *testing.T) {
|
||||
run(t, name, start, false)
|
||||
})
|
||||
t.Run("AuthProxy", func(t *testing.T) {
|
||||
run(t, name, start, true)
|
||||
})
|
||||
}
|
||||
@@ -47,7 +47,6 @@ func shellUnEscape(str string) string {
|
||||
// Info about the current connection
|
||||
type conn struct {
|
||||
vfs *vfs.VFS
|
||||
f fs.Fs
|
||||
handlers sftp.Handlers
|
||||
what string
|
||||
}
|
||||
@@ -65,7 +64,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
|
||||
switch binary {
|
||||
case "df":
|
||||
about := c.f.Features().About
|
||||
about := c.vfs.Fs().Features().About
|
||||
if about == nil {
|
||||
return errors.New("df not supported")
|
||||
}
|
||||
@@ -121,7 +120,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
// special cases for rclone command detection
|
||||
switch args {
|
||||
case "'abc' | md5sum":
|
||||
if c.f.Hashes().Contains(hash.MD5) {
|
||||
if c.vfs.Fs().Hashes().Contains(hash.MD5) {
|
||||
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "send output failed")
|
||||
@@ -130,7 +129,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
|
||||
return errors.New("md5 hash not supported")
|
||||
}
|
||||
case "'abc' | sha1sum":
|
||||
if c.f.Hashes().Contains(hash.SHA1) {
|
||||
if c.vfs.Fs().Hashes().Contains(hash.SHA1) {
|
||||
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "send output failed")
|
||||
|
||||
@@ -19,14 +19,14 @@ type vfsHandler struct {
|
||||
}
|
||||
|
||||
// vfsHandler returns a Handlers object with the test handlers.
|
||||
func newVFSHandler(vfs *vfs.VFS) (sftp.Handlers, error) {
|
||||
func newVFSHandler(vfs *vfs.VFS) sftp.Handlers {
|
||||
v := vfsHandler{VFS: vfs}
|
||||
return sftp.Handlers{
|
||||
FileGet: v,
|
||||
FilePut: v,
|
||||
FileCmd: v,
|
||||
FileList: v,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) {
|
||||
|
||||
@@ -18,7 +18,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/sftp"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -33,21 +34,47 @@ type server struct {
|
||||
opt Options
|
||||
vfs *vfs.VFS
|
||||
config *ssh.ServerConfig
|
||||
handlers sftp.Handlers
|
||||
listener net.Listener
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
proxy *proxy.Proxy
|
||||
}
|
||||
|
||||
func newServer(f fs.Fs, opt *Options) *server {
|
||||
s := &server{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
opt: *opt,
|
||||
waitChan: make(chan struct{}),
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(&proxyflags.Opt)
|
||||
} else {
|
||||
s.vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// getVFS gets the vfs from s or the proxy
|
||||
func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
|
||||
if s.proxy == nil {
|
||||
return s.vfs
|
||||
}
|
||||
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
|
||||
fs.Infof(what, "SSH Permissions Extensions not found")
|
||||
return nil
|
||||
}
|
||||
key := sshConn.Permissions.Extensions["_vfsKey"]
|
||||
if key == "" {
|
||||
fs.Infof(what, "VFS key not found")
|
||||
return nil
|
||||
}
|
||||
VFS = s.proxy.Get(key)
|
||||
if VFS == nil {
|
||||
fs.Infof(what, "failed to read VFS from cache")
|
||||
return nil
|
||||
}
|
||||
return VFS
|
||||
}
|
||||
|
||||
func (s *server) acceptConnections() {
|
||||
for {
|
||||
nConn, err := s.listener.Accept()
|
||||
@@ -73,11 +100,15 @@ func (s *server) acceptConnections() {
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
c := &conn{
|
||||
vfs: s.vfs,
|
||||
f: s.f,
|
||||
handlers: s.handlers,
|
||||
what: what,
|
||||
what: what,
|
||||
vfs: s.getVFS(what, sshConn),
|
||||
}
|
||||
if c.vfs == nil {
|
||||
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
|
||||
_ = nConn.Close()
|
||||
continue
|
||||
}
|
||||
c.handlers = newVFSHandler(c.vfs)
|
||||
|
||||
// Accept all channels
|
||||
go c.handleChannels(chans)
|
||||
@@ -109,7 +140,19 @@ func (s *server) serve() (err error) {
|
||||
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
|
||||
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
||||
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
|
||||
if s.opt.User != "" && s.opt.Pass != "" {
|
||||
if s.proxy != nil {
|
||||
// query the proxy for the config
|
||||
_, vfsKey, err := s.proxy.Call(c.User(), string(pass))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// just return the Key so we can get it back from the cache
|
||||
return &ssh.Permissions{
|
||||
Extensions: map[string]string{
|
||||
"_vfsKey": vfsKey,
|
||||
},
|
||||
}, nil
|
||||
} else if s.opt.User != "" && s.opt.Pass != "" {
|
||||
userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User))
|
||||
passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass))
|
||||
if (userOK & passOK) == 1 {
|
||||
@@ -120,6 +163,9 @@ func (s *server) serve() (err error) {
|
||||
},
|
||||
PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
|
||||
fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User())
|
||||
if s.proxy != nil {
|
||||
return nil, errors.New("public key login not allowed when using auth proxy")
|
||||
}
|
||||
if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok {
|
||||
return &ssh.Permissions{
|
||||
// Record the public key used for authentication.
|
||||
@@ -178,11 +224,6 @@ func (s *server) serve() (err error) {
|
||||
}
|
||||
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
|
||||
|
||||
s.handlers, err = newVFSHandler(s.vfs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "serve sftp: failed to create fs")
|
||||
}
|
||||
|
||||
go s.acceptConnections()
|
||||
|
||||
return nil
|
||||
|
||||
@@ -6,6 +6,9 @@ package sftp
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
@@ -46,6 +49,7 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
|
||||
|
||||
func init() {
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
AddFlags(Command.Flags(), &Opt)
|
||||
}
|
||||
|
||||
@@ -84,10 +88,15 @@ reachable externally then supply "--addr :2022" for example.
|
||||
Note that the default of "--vfs-cache-mode off" is fine for the rclone
|
||||
sftp backend, but it may not be with other SFTP clients.
|
||||
|
||||
` + vfs.Help,
|
||||
` + vfs.Help + proxy.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s := newServer(f, &Opt)
|
||||
err := s.Serve()
|
||||
|
||||
@@ -8,16 +8,15 @@
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/sftp"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -38,58 +37,35 @@ var (
|
||||
// TestSftp runs the sftp server then runs the unit tests for the
|
||||
// sftp remote against it.
|
||||
func TestSftp(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.User = testUser
|
||||
opt.Pass = testPass
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
w := newServer(f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
// Read the host and port we started on
|
||||
addr := w.Addr()
|
||||
colon := strings.LastIndex(addr, ":")
|
||||
|
||||
opt := DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.User = testUser
|
||||
opt.Pass = testPass
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "sftp",
|
||||
"user": testUser,
|
||||
"pass": obscure.MustObscure(testPass),
|
||||
"host": addr[:colon],
|
||||
"port": addr[colon+1:],
|
||||
}
|
||||
|
||||
// Start the server
|
||||
w := newServer(fremote, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
defer func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/sftp")
|
||||
assert.NoError(t, err, "failed to cd to sftp backend")
|
||||
|
||||
// Run the sftp tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
// return a stop function
|
||||
return config, func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-remote", "sftptest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
addr := w.Addr()
|
||||
colon := strings.LastIndex(addr, ":")
|
||||
if colon < 0 {
|
||||
panic("need a : in the address: " + addr)
|
||||
}
|
||||
host, port := addr[:colon], addr[colon+1:]
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_SFTPTEST_TYPE=sftp",
|
||||
"RCLONE_CONFIG_SFTPTEST_HOST="+host,
|
||||
"RCLONE_CONFIG_SFTPTEST_PORT="+port,
|
||||
"RCLONE_CONFIG_SFTPTEST_USER="+testUser,
|
||||
"RCLONE_CONFIG_SFTPTEST_PASS="+obscure.MustObscure(testPass),
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running sftp integration tests")
|
||||
|
||||
servetest.Run(t, "sftp", start)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,11 @@ import (
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/serve"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/errors"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -30,6 +32,7 @@ var (
|
||||
func init() {
|
||||
httpflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory")
|
||||
}
|
||||
@@ -57,10 +60,15 @@ supported hash on the backend or you can use a named hash such as
|
||||
|
||||
Use "rclone hashsum" to see the full list.
|
||||
|
||||
` + httplib.Help + vfs.Help,
|
||||
` + httplib.Help + vfs.Help + proxy.Help,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
hashType = hash.None
|
||||
if hashName == "auto" {
|
||||
hashType = f.Hashes().GetOne()
|
||||
@@ -101,8 +109,9 @@ Use "rclone hashsum" to see the full list.
|
||||
type WebDAV struct {
|
||||
*httplib.Server
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
webdavhandler *webdav.Handler
|
||||
proxy *proxy.Proxy
|
||||
}
|
||||
|
||||
// check interface
|
||||
@@ -111,21 +120,58 @@ var _ webdav.FileSystem = (*WebDAV)(nil)
|
||||
// Make a new WebDAV to serve the remote
|
||||
func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV {
|
||||
w := &WebDAV{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
w.proxy = proxy.New(&proxyflags.Opt)
|
||||
// override auth
|
||||
copyOpt := *opt
|
||||
copyOpt.Auth = w.auth
|
||||
opt = ©Opt
|
||||
} else {
|
||||
w._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
|
||||
webdavHandler := &webdav.Handler{
|
||||
Prefix: w.Server.Opt.Prefix,
|
||||
FileSystem: w,
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: w.logRequest, // FIXME
|
||||
}
|
||||
w.webdavhandler = webdavHandler
|
||||
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
|
||||
return w
|
||||
}
|
||||
|
||||
// Gets the VFS in use for this request
|
||||
func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if w._vfs != nil {
|
||||
return w._vfs, nil
|
||||
}
|
||||
value := ctx.Value(httplib.ContextAuthKey)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
VFS, ok := value.(*vfs.VFS)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("context value is not VFS: %#v", value)
|
||||
}
|
||||
return VFS, nil
|
||||
}
|
||||
|
||||
// auth does proxy authorization
|
||||
func (w *WebDAV) auth(user, pass string) (value interface{}, err error) {
|
||||
VFS, _, err := w.proxy.Call(user, pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
|
||||
urlPath := r.URL.Path
|
||||
urlPath, ok := w.Path(rw, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
isDir := strings.HasSuffix(urlPath, "/")
|
||||
remote := strings.Trim(urlPath, "/")
|
||||
if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
|
||||
@@ -138,8 +184,14 @@ func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
|
||||
// serveDir serves a directory index at dirRemote
|
||||
// This is similar to serveDir in serve http.
|
||||
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
VFS, err := w.getVFS(r.Context())
|
||||
if err != nil {
|
||||
http.Error(rw, "Root directory not found", http.StatusNotFound)
|
||||
fs.Errorf(nil, "Failed to serve directory: %v", err)
|
||||
return
|
||||
}
|
||||
// List the directory
|
||||
node, err := w.vfs.Stat(dirRemote)
|
||||
node, err := VFS.Stat(dirRemote)
|
||||
if err == vfs.ENOENT {
|
||||
http.Error(rw, "Directory not found", http.StatusNotFound)
|
||||
return
|
||||
@@ -186,8 +238,12 @@ func (w *WebDAV) logRequest(r *http.Request, err error) {
|
||||
|
||||
// Mkdir creates a directory
|
||||
func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) {
|
||||
defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
|
||||
dir, leaf, err := w.vfs.StatParent(name)
|
||||
// defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dir, leaf, err := VFS.StatParent(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -197,8 +253,12 @@ func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err
|
||||
|
||||
// OpenFile opens a file or a directory
|
||||
func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) {
|
||||
defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
|
||||
f, err := w.vfs.OpenFile(name, flags, perm)
|
||||
// defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := VFS.OpenFile(name, flags, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -207,8 +267,12 @@ func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.F
|
||||
|
||||
// RemoveAll removes a file or a directory and its contents
|
||||
func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
|
||||
defer log.Trace(name, "")("err = %v", &err)
|
||||
node, err := w.vfs.Stat(name)
|
||||
// defer log.Trace(name, "")("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node, err := VFS.Stat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -221,14 +285,22 @@ func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
|
||||
|
||||
// Rename a file or a directory
|
||||
func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) {
|
||||
defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
|
||||
return w.vfs.Rename(oldName, newName)
|
||||
// defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VFS.Rename(oldName, newName)
|
||||
}
|
||||
|
||||
// Stat returns info about the file or directory
|
||||
func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) {
|
||||
defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
|
||||
fi, err = w.vfs.Stat(name)
|
||||
// defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
|
||||
VFS, err := w.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err = VFS.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -270,7 +342,7 @@ type FileInfo struct {
|
||||
|
||||
// ETag returns an ETag for the FileInfo
|
||||
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
|
||||
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
|
||||
if hashType == hash.None {
|
||||
return "", webdav.ErrNotImplemented
|
||||
}
|
||||
@@ -293,7 +365,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
|
||||
// ContentType returns a content type for the FileInfo
|
||||
func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) {
|
||||
defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
|
||||
// defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
|
||||
node, ok := (fi.FileInfo).(vfs.Node)
|
||||
if !ok {
|
||||
fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo)
|
||||
|
||||
@@ -8,21 +8,22 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/webdav"
|
||||
@@ -30,6 +31,8 @@ import (
|
||||
|
||||
const (
|
||||
testBindAddress = "localhost:0"
|
||||
testUser = "user"
|
||||
testPass = "pass"
|
||||
)
|
||||
|
||||
// check interfaces
|
||||
@@ -42,50 +45,34 @@ var (
|
||||
// TestWebDav runs the webdav server then runs the unit tests for the
|
||||
// webdav remote against it.
|
||||
func TestWebDav(t *testing.T) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.BasicUser = testUser
|
||||
opt.BasicPass = testPass
|
||||
hashType = hash.MD5
|
||||
|
||||
fstest.Initialise()
|
||||
// Start the server
|
||||
w := newWebDAV(f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "webdav",
|
||||
"vendor": "other",
|
||||
"url": w.Server.URL(),
|
||||
"user": testUser,
|
||||
"pass": obscure.MustObscure(testPass),
|
||||
}
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Start the server
|
||||
w := newWebDAV(fremote, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
defer func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir("../../../backend/webdav")
|
||||
assert.NoError(t, err, "failed to cd to webdav remote")
|
||||
|
||||
// Run the webdav tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
return config, func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
}
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
args = append(args, "-remote", "webdavtest:")
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
|
||||
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(),
|
||||
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running webdav integration tests")
|
||||
|
||||
servetest.Run(t, "webdav", start)
|
||||
}
|
||||
|
||||
// Test serve http functionality in serve webdav
|
||||
@@ -97,10 +84,6 @@ var (
|
||||
)
|
||||
|
||||
func TestHTTPFunction(t *testing.T) {
|
||||
// cd to correct directory for testing
|
||||
err := os.Chdir("../../cmd/serve/webdav")
|
||||
assert.NoError(t, err, "failed to cd to webdav cmd directory")
|
||||
|
||||
// exclude files called hidden.txt and directories called hidden
|
||||
require.NoError(t, filter.Active.AddRule("- hidden.txt"))
|
||||
require.NoError(t, filter.Active.AddRule("- hidden/**"))
|
||||
|
||||
88
fs/cache/cache.go
vendored
88
fs/cache/cache.go
vendored
@@ -2,93 +2,39 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
fsCacheMu sync.Mutex
|
||||
fsCache = map[string]*cacheEntry{}
|
||||
fsNewFs = fs.NewFs // for tests
|
||||
expireRunning = false
|
||||
cacheExpireDuration = 300 * time.Second // expire the cache entry when it is older than this
|
||||
cacheExpireInterval = 60 * time.Second // interval to run the cache expire
|
||||
c = cache.New()
|
||||
)
|
||||
|
||||
type cacheEntry struct {
|
||||
f fs.Fs // cached f
|
||||
err error // nil or fs.ErrorIsFile
|
||||
fsString string // remote string
|
||||
lastUsed time.Time // time used for expiry
|
||||
// GetFn gets a fs.Fs named fsString either from the cache or creates
|
||||
// it afresh with the create function
|
||||
func GetFn(fsString string, create func(fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||
value, err := c.Get(fsString, func(fsString string) (value interface{}, ok bool, error error) {
|
||||
f, err := create(fsString)
|
||||
ok = err == nil || err == fs.ErrorIsFile
|
||||
return f, ok, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return value.(fs.Fs), nil
|
||||
}
|
||||
|
||||
// Get gets a fs.Fs named fsString either from the cache or creates it afresh
|
||||
func Get(fsString string) (f fs.Fs, err error) {
|
||||
fsCacheMu.Lock()
|
||||
entry, ok := fsCache[fsString]
|
||||
if !ok {
|
||||
fsCacheMu.Unlock() // Unlock in case Get is called recursively
|
||||
f, err = fsNewFs(fsString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return f, err
|
||||
}
|
||||
entry = &cacheEntry{
|
||||
f: f,
|
||||
fsString: fsString,
|
||||
err: err,
|
||||
}
|
||||
fsCacheMu.Lock()
|
||||
fsCache[fsString] = entry
|
||||
}
|
||||
defer fsCacheMu.Unlock()
|
||||
entry.lastUsed = time.Now()
|
||||
if !expireRunning {
|
||||
time.AfterFunc(cacheExpireInterval, cacheExpire)
|
||||
expireRunning = true
|
||||
}
|
||||
return entry.f, entry.err
|
||||
return GetFn(fsString, fs.NewFs)
|
||||
}
|
||||
|
||||
// Put puts an fs.Fs named fsString into the cache
|
||||
func Put(fsString string, f fs.Fs) {
|
||||
fsCacheMu.Lock()
|
||||
defer fsCacheMu.Unlock()
|
||||
fsCache[fsString] = &cacheEntry{
|
||||
f: f,
|
||||
fsString: fsString,
|
||||
lastUsed: time.Now(),
|
||||
}
|
||||
if !expireRunning {
|
||||
time.AfterFunc(cacheExpireInterval, cacheExpire)
|
||||
expireRunning = true
|
||||
}
|
||||
}
|
||||
|
||||
// cacheExpire expires any entries that haven't been used recently
|
||||
func cacheExpire() {
|
||||
fsCacheMu.Lock()
|
||||
defer fsCacheMu.Unlock()
|
||||
now := time.Now()
|
||||
for fsString, entry := range fsCache {
|
||||
if now.Sub(entry.lastUsed) > cacheExpireDuration {
|
||||
delete(fsCache, fsString)
|
||||
}
|
||||
}
|
||||
if len(fsCache) != 0 {
|
||||
time.AfterFunc(cacheExpireInterval, cacheExpire)
|
||||
expireRunning = true
|
||||
} else {
|
||||
expireRunning = false
|
||||
}
|
||||
c.Put(fsString, f)
|
||||
}
|
||||
|
||||
// Clear removes everything from the cahce
|
||||
func Clear() {
|
||||
fsCacheMu.Lock()
|
||||
for k := range fsCache {
|
||||
delete(fsCache, k)
|
||||
}
|
||||
fsCacheMu.Unlock()
|
||||
c.Clear()
|
||||
}
|
||||
|
||||
99
fs/cache/cache_test.go
vendored
99
fs/cache/cache_test.go
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/mockfs"
|
||||
@@ -17,10 +16,9 @@ var (
|
||||
errSentinel = errors.New("an error")
|
||||
)
|
||||
|
||||
func mockNewFs(t *testing.T) func() {
|
||||
func mockNewFs(t *testing.T) (func(), func(path string) (fs.Fs, error)) {
|
||||
called = 0
|
||||
oldFsNewFs := fsNewFs
|
||||
fsNewFs = func(path string) (fs.Fs, error) {
|
||||
create := func(path string) (fs.Fs, error) {
|
||||
assert.Equal(t, 0, called)
|
||||
called++
|
||||
switch path {
|
||||
@@ -33,115 +31,74 @@ func mockNewFs(t *testing.T) func() {
|
||||
}
|
||||
panic(fmt.Sprintf("Unknown path %q", path))
|
||||
}
|
||||
return func() {
|
||||
fsNewFs = oldFsNewFs
|
||||
fsCacheMu.Lock()
|
||||
fsCache = map[string]*cacheEntry{}
|
||||
expireRunning = false
|
||||
fsCacheMu.Unlock()
|
||||
cleanup := func() {
|
||||
c.Clear()
|
||||
}
|
||||
return cleanup, create
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
|
||||
f, err := Get("/")
|
||||
f, err := GetFn("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
|
||||
f2, err := Get("/")
|
||||
f2, err := GetFn("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, f, f2)
|
||||
}
|
||||
|
||||
func TestGetFile(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
|
||||
f, err := Get("/file.txt")
|
||||
f, err := GetFn("/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
|
||||
f2, err := Get("/file.txt")
|
||||
f2, err := GetFn("/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
||||
assert.Equal(t, f, f2)
|
||||
}
|
||||
|
||||
func TestGetError(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
|
||||
f, err := Get("/error")
|
||||
f, err := GetFn("/error", create)
|
||||
require.Equal(t, errSentinel, err)
|
||||
require.Equal(t, nil, f)
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
cleanup, create := mockNewFs(t)
|
||||
defer cleanup()
|
||||
|
||||
f := mockfs.NewFs("mock", "mock")
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
|
||||
Put("/alien", f)
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
|
||||
fNew, err := Get("/alien")
|
||||
fNew, err := GetFn("/alien", create)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
}
|
||||
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
cacheExpireInterval = time.Millisecond
|
||||
assert.Equal(t, false, expireRunning)
|
||||
|
||||
_, err := Get("/")
|
||||
require.NoError(t, err)
|
||||
|
||||
fsCacheMu.Lock()
|
||||
entry := fsCache["/"]
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
fsCacheMu.Unlock()
|
||||
cacheExpire()
|
||||
fsCacheMu.Lock()
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
entry.lastUsed = time.Now().Add(-cacheExpireDuration - 60*time.Second)
|
||||
assert.Equal(t, true, expireRunning)
|
||||
fsCacheMu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
fsCacheMu.Lock()
|
||||
assert.Equal(t, false, expireRunning)
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
fsCacheMu.Unlock()
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
defer mockNewFs(t)()
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
|
||||
_, err := Get("/")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(fsCache))
|
||||
|
||||
Clear()
|
||||
|
||||
assert.Equal(t, 0, len(fsCache))
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
@@ -52,7 +53,7 @@ func TestMultithreadCopy(t *testing.T) {
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
|
||||
var err error
|
||||
contents := fstest.RandomString(test.size)
|
||||
contents := random.String(test.size)
|
||||
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
file1 := r.WriteObject(context.Background(), "file1", contents, t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -1666,7 +1666,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
// to avoid issues with certain remotes and avoid file deletion.
|
||||
if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) {
|
||||
// Create random name to temporarily move file to
|
||||
tmpObjName := dstFileName + "-rclone-move-" + random(8)
|
||||
tmpObjName := dstFileName + "-rclone-move-" + random.String(8)
|
||||
_, err := fdst.NewObject(ctx, tmpObjName)
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
if err == nil {
|
||||
@@ -1730,17 +1730,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
return err
|
||||
}
|
||||
|
||||
// random generates a pseudorandom alphanumeric string
|
||||
func random(length int) string {
|
||||
randomOutput := make([]byte, length)
|
||||
possibleCharacters := "123567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
rand.Seed(time.Now().Unix())
|
||||
for i := range randomOutput {
|
||||
randomOutput[i] = possibleCharacters[rand.Intn(len(possibleCharacters))]
|
||||
}
|
||||
return string(randomOutput)
|
||||
}
|
||||
|
||||
// MoveFile moves a single file possibly to a new name
|
||||
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)
|
||||
|
||||
@@ -20,8 +20,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.")
|
||||
flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.")
|
||||
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "w", false, "Launch WebGUI on localhost")
|
||||
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost")
|
||||
flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Update / Force update to latest version of web gui")
|
||||
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/negative0/rclone-webui-react/releases/latest", "URL to fetch the releases from")
|
||||
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
|
||||
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
|
||||
}
|
||||
|
||||
@@ -95,6 +95,8 @@ func (s *Server) Serve() error {
|
||||
// Don't open browser if serving in testing environment.
|
||||
if flag.Lookup("test.v") == nil {
|
||||
_ = open.Start(openURL.String())
|
||||
} else {
|
||||
fs.Errorf(nil, "Not opening browser in testing environment")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -357,24 +358,6 @@ func Time(timeString string) time.Time {
|
||||
return t
|
||||
}
|
||||
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// LocalRemote creates a temporary directory name for local remotes
|
||||
func LocalRemote() (path string, err error) {
|
||||
path, err = ioutil.TempDir("", "rclone")
|
||||
@@ -403,7 +386,7 @@ func RandomRemoteName(remoteName string) (string, string, error) {
|
||||
if !strings.HasSuffix(remoteName, ":") {
|
||||
remoteName += "/"
|
||||
}
|
||||
leafName = "rclone-test-" + RandomString(24)
|
||||
leafName = "rclone-test-" + random.String(24)
|
||||
if !MatchTestRemote.MatchString(leafName) {
|
||||
log.Fatalf("%q didn't match the test remote name regexp", leafName)
|
||||
}
|
||||
@@ -432,7 +415,7 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
remoteName += "/rclone-test-subdir-" + RandomString(8)
|
||||
remoteName += "/rclone-test-subdir-" + random.String(8)
|
||||
}
|
||||
|
||||
remote, err := fs.NewFs(remoteName)
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -157,7 +158,7 @@ func testPut(t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
|
||||
contents string
|
||||
)
|
||||
retry(t, "Put", func() error {
|
||||
contents = fstest.RandomString(100)
|
||||
contents = random.String(100)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash = hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
@@ -557,7 +558,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
|
||||
const N = 5 * 1024
|
||||
// Read N bytes then produce an error
|
||||
contents := fstest.RandomString(N)
|
||||
contents := random.String(N)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
er := &errorReader{errors.New("potato")}
|
||||
in := io.MultiReader(buf, er)
|
||||
@@ -1322,7 +1323,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
// TestObjectUpdate tests that Update works
|
||||
t.Run("ObjectUpdate", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
contents := fstest.RandomString(200)
|
||||
contents := random.String(200)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
hash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, hash)
|
||||
@@ -1507,7 +1508,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
contentSize = 100
|
||||
)
|
||||
retry(t, "PutStream", func() error {
|
||||
contents := fstest.RandomString(contentSize)
|
||||
contents := random.String(contentSize)
|
||||
buf := bytes.NewBufferString(contents)
|
||||
uploadHash = hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
@@ -1564,7 +1565,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
|
||||
}()
|
||||
|
||||
contents := fstest.RandomString(100)
|
||||
contents := random.String(100)
|
||||
in := bytes.NewBufferString(contents)
|
||||
|
||||
obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
@@ -1587,7 +1588,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
|
||||
}()
|
||||
|
||||
newContents := fstest.RandomString(200)
|
||||
newContents := random.String(200)
|
||||
in := bytes.NewBufferString(newContents)
|
||||
|
||||
obj := findObject(t, remote, unknownSizeUpdateFile.Path)
|
||||
|
||||
134
lib/cache/cache.go
vendored
Normal file
134
lib/cache/cache.go
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package cache implements a simple cache where the entries are
|
||||
// expired after a given time (5 minutes of disuse by default).
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Cache holds values indexed by string, but expired after a given (5
|
||||
// minutes by default).
|
||||
type Cache struct {
|
||||
mu sync.Mutex
|
||||
cache map[string]*cacheEntry
|
||||
expireRunning bool
|
||||
expireDuration time.Duration // expire the cache entry when it is older than this
|
||||
expireInterval time.Duration // interval to run the cache expire
|
||||
}
|
||||
|
||||
// New creates a new cache with the default expire duration and interval
|
||||
func New() *Cache {
|
||||
return &Cache{
|
||||
cache: map[string]*cacheEntry{},
|
||||
expireRunning: false,
|
||||
expireDuration: 300 * time.Second,
|
||||
expireInterval: 60 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// cacheEntry is stored in the cache
|
||||
type cacheEntry struct {
|
||||
value interface{} // cached item
|
||||
err error // creation error
|
||||
key string // key
|
||||
lastUsed time.Time // time used for expiry
|
||||
}
|
||||
|
||||
// CreateFunc is called to create new values. If the create function
|
||||
// returns an error it will be cached if ok is true, otherwise the
|
||||
// error will just be returned, allowing negative caching if required.
|
||||
type CreateFunc func(key string) (value interface{}, ok bool, error error)
|
||||
|
||||
// used marks an entry as accessed now and kicks the expire timer off
|
||||
// should be called with the lock held
|
||||
func (c *Cache) used(entry *cacheEntry) {
|
||||
entry.lastUsed = time.Now()
|
||||
if !c.expireRunning {
|
||||
time.AfterFunc(c.expireInterval, c.cacheExpire)
|
||||
c.expireRunning = true
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets a value named key either from the cache or creates it
|
||||
// afresh with the create function.
|
||||
func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error) {
|
||||
c.mu.Lock()
|
||||
entry, ok := c.cache[key]
|
||||
if !ok {
|
||||
c.mu.Unlock() // Unlock in case Get is called recursively
|
||||
value, ok, err = create(key)
|
||||
if err != nil && !ok {
|
||||
return value, err
|
||||
}
|
||||
entry = &cacheEntry{
|
||||
value: value,
|
||||
key: key,
|
||||
err: err,
|
||||
}
|
||||
c.mu.Lock()
|
||||
c.cache[key] = entry
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
c.used(entry)
|
||||
return entry.value, entry.err
|
||||
}
|
||||
|
||||
// Put puts an value named key into the cache
|
||||
func (c *Cache) Put(key string, value interface{}) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
entry := &cacheEntry{
|
||||
value: value,
|
||||
key: key,
|
||||
}
|
||||
c.used(entry)
|
||||
c.cache[key] = entry
|
||||
}
|
||||
|
||||
// GetMaybe returns the key and true if found, nil and false if not
|
||||
func (c *Cache) GetMaybe(key string) (value interface{}, found bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
entry, found := c.cache[key]
|
||||
if !found {
|
||||
return nil, found
|
||||
}
|
||||
c.used(entry)
|
||||
return entry.value, found
|
||||
}
|
||||
|
||||
// cacheExpire expires any entries that haven't been used recently
|
||||
func (c *Cache) cacheExpire() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
now := time.Now()
|
||||
for key, entry := range c.cache {
|
||||
if now.Sub(entry.lastUsed) > c.expireDuration {
|
||||
delete(c.cache, key)
|
||||
}
|
||||
}
|
||||
if len(c.cache) != 0 {
|
||||
time.AfterFunc(c.expireInterval, c.cacheExpire)
|
||||
c.expireRunning = true
|
||||
} else {
|
||||
c.expireRunning = false
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes everything from the cahce
|
||||
func (c *Cache) Clear() {
|
||||
c.mu.Lock()
|
||||
for k := range c.cache {
|
||||
delete(c.cache, k)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Entries returns the number of entries in the cache
|
||||
func (c *Cache) Entries() int {
|
||||
c.mu.Lock()
|
||||
entries := len(c.cache)
|
||||
c.mu.Unlock()
|
||||
return entries
|
||||
}
|
||||
174
lib/cache/cache_test.go
vendored
Normal file
174
lib/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
called = 0
|
||||
errSentinel = errors.New("an error")
|
||||
errCached = errors.New("a cached error")
|
||||
)
|
||||
|
||||
func setup(t *testing.T) (*Cache, CreateFunc) {
|
||||
called = 0
|
||||
create := func(path string) (interface{}, bool, error) {
|
||||
assert.Equal(t, 0, called)
|
||||
called++
|
||||
switch path {
|
||||
case "/":
|
||||
return "/", true, nil
|
||||
case "/file.txt":
|
||||
return "/file.txt", true, errCached
|
||||
case "/error":
|
||||
return nil, false, errSentinel
|
||||
}
|
||||
panic(fmt.Sprintf("Unknown path %q", path))
|
||||
}
|
||||
c := New()
|
||||
return c, create
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
|
||||
f, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
|
||||
f2, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, f, f2)
|
||||
}
|
||||
|
||||
func TestGetFile(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
|
||||
f, err := c.Get("/file.txt", create)
|
||||
require.Equal(t, errCached, err)
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
|
||||
f2, err := c.Get("/file.txt", create)
|
||||
require.Equal(t, errCached, err)
|
||||
|
||||
assert.Equal(t, f, f2)
|
||||
}
|
||||
|
||||
func TestGetError(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
|
||||
f, err := c.Get("/error", create)
|
||||
require.Equal(t, errSentinel, err)
|
||||
require.Equal(t, nil, f)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
}
|
||||
|
||||
func TestPut(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
|
||||
c.Put("/alien", "slime")
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
|
||||
fNew, err := c.Get("/alien", create)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "slime", fNew)
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
}
|
||||
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
c.expireInterval = time.Millisecond
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
|
||||
_, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.mu.Lock()
|
||||
entry := c.cache["/"]
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
c.cacheExpire()
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
|
||||
assert.Equal(t, true, c.expireRunning)
|
||||
c.mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
c.mu.Lock()
|
||||
assert.Equal(t, false, c.expireRunning)
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func TestClear(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
|
||||
_, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(c.cache))
|
||||
|
||||
c.Clear()
|
||||
|
||||
assert.Equal(t, 0, len(c.cache))
|
||||
}
|
||||
|
||||
func TestEntries(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
|
||||
_, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, c.Entries())
|
||||
|
||||
c.Clear()
|
||||
|
||||
assert.Equal(t, 0, c.Entries())
|
||||
}
|
||||
|
||||
func TestGetMaybe(t *testing.T) {
|
||||
c, create := setup(t)
|
||||
|
||||
value, found := c.GetMaybe("/")
|
||||
assert.Equal(t, false, found)
|
||||
assert.Nil(t, value)
|
||||
|
||||
f, err := c.Get("/", create)
|
||||
require.NoError(t, err)
|
||||
|
||||
value, found = c.GetMaybe("/")
|
||||
assert.Equal(t, true, found)
|
||||
assert.Equal(t, f, value)
|
||||
|
||||
c.Clear()
|
||||
|
||||
value, found = c.GetMaybe("/")
|
||||
assert.Equal(t, false, found)
|
||||
assert.Nil(t, value)
|
||||
}
|
||||
22
lib/random/random.go
Normal file
22
lib/random/random.go
Normal file
@@ -0,0 +1,22 @@
|
||||
// Package random holds a few functions for working with random numbers
|
||||
package random
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// String create a random string for test purposes
|
||||
func String(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
13
lib/random/random_test.go
Normal file
13
lib/random/random_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package random
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
for i := 0; i < 100; i++ {
|
||||
assert.Equal(t, i, len(String(i)))
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,24 +36,6 @@ func init() {
|
||||
|
||||
}
|
||||
|
||||
// RandomString create a random string for test purposes
|
||||
func RandomString(n int) string {
|
||||
const (
|
||||
vowel = "aeiou"
|
||||
consonant = "bcdfghjklmnpqrstvwxyz"
|
||||
digit = "0123456789"
|
||||
)
|
||||
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
|
||||
out := make([]byte, n)
|
||||
p := 0
|
||||
for i := range out {
|
||||
source := pattern[p]
|
||||
p = (p + 1) % len(pattern)
|
||||
out[i] = source[rand.Intn(len(source))]
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Test contains stats about the running test which work for files or
|
||||
// directories
|
||||
type Test struct {
|
||||
@@ -71,7 +54,7 @@ type Test struct {
|
||||
func NewTest(Dir string) *Test {
|
||||
t := &Test{
|
||||
dir: Dir,
|
||||
name: RandomString(*nameLength),
|
||||
name: random.String(*nameLength),
|
||||
isDir: rand.Intn(2) == 0,
|
||||
number: atomic.AddInt32(&testNumber, 1),
|
||||
timer: time.NewTimer(*timeout),
|
||||
@@ -168,7 +151,7 @@ func (t *Test) rename() {
|
||||
return
|
||||
}
|
||||
t.logf("rename")
|
||||
NewName := RandomString(*nameLength)
|
||||
NewName := random.String(*nameLength)
|
||||
newPath := path.Join(t.dir, NewName)
|
||||
err := os.Rename(t.path(), newPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -242,6 +242,11 @@ func New(f fs.Fs, opt *Options) *VFS {
|
||||
return vfs
|
||||
}
|
||||
|
||||
// Fs returns the Fs passed into the New call
|
||||
func (vfs *VFS) Fs() fs.Fs {
|
||||
return vfs.f
|
||||
}
|
||||
|
||||
// SetCacheMode change the cache mode
|
||||
func (vfs *VFS) SetCacheMode(cacheMode CacheMode) {
|
||||
vfs.Shutdown()
|
||||
|
||||
Reference in New Issue
Block a user