1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Compare commits

..

21 Commits

Author SHA1 Message Date
Nick Craig-Wood
5e69e87ea7 add build caching 2019-08-05 12:13:24 +01:00
Nick Craig-Wood
1259785329 add build caching 2019-08-05 10:55:11 +01:00
Nick Craig-Wood
de4a20894a add build caching 2019-08-05 10:28:20 +01:00
Nick Craig-Wood
af9a972760 add build caching 2019-08-05 10:16:40 +01:00
Nick Craig-Wood
6dde819d27 add build caching 2019-08-05 10:03:25 +01:00
Nick Craig-Wood
048ff6070c add build caching 2019-08-04 20:29:03 +01:00
Nick Craig-Wood
f3ae16aa1c add build caching 2019-08-04 20:24:40 +01:00
Nick Craig-Wood
8ef9846a4c build: CRLF again 2019-08-04 19:49:14 +01:00
Nick Craig-Wood
7a8cd59ea6 build: CRLF again 2019-08-04 19:17:42 +01:00
Nick Craig-Wood
a75457c738 build: fix CRLF avoidance 2019-08-04 19:00:15 +01:00
Nick Craig-Wood
bfe0f0ec2c build: Don't auto CRLF test data 2019-08-04 17:55:36 +01:00
Nick Craig-Wood
fd1154828f build: bring pipelines in to line with travis 2019-08-04 17:03:27 +01:00
Nick Craig-Wood
7b687709cf Attempt to cure the double build 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
812fcbbe2c Attempt to fix Windows CPATH 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
85a15d39d0 fix compile_all under linux 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
cc39c4e775 Set CPATH under Windows 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
4e46d26a0b Fix BRANCH and apt-get 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
0554daf3d8 Set working directory 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
f633996da6 build: whitespace! 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
9b74d1beb1 FIXME DISABLE TRAVIS and APPVEYOR 2019-08-04 16:11:24 +01:00
Nick Craig-Wood
62a4bad5d2 build: add azure pipelines build 2019-08-04 16:11:23 +01:00
47 changed files with 628 additions and 1727 deletions

View File

@@ -46,4 +46,4 @@ artifacts:
- path: build/*-v*.zip
deploy_script:
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make upload_beta
- IF "%APPVEYOR_REPO_NAME%" == "rclone/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload

View File

@@ -84,6 +84,7 @@ matrix:
- BUILD_FLAGS='-exclude "^(windows|darwin|linux)/"'
script:
- make
- make compile_all
- go: 1.12.x
name: macOS
os: osx
@@ -119,11 +120,9 @@ matrix:
deploy:
provider: script
script:
- make beta
- [[ "$TRAVIS_PULL_REQUEST" == "false" ]] && make upload_beta
script: make travis_beta
skip_cleanup: true
on:
repo: rclone/rclone
all_branches: true
condition: $DEPLOY == true
condition: $TRAVIS_PULL_REQUEST == false && $DEPLOY == true

View File

@@ -17,10 +17,7 @@ ifneq ($(TAG),$(LAST_TAG))
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
ifdef BETA_SUBDIR
BETA_SUBDIR := /$(BETA_SUBDIR)
endif
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
@@ -145,7 +142,7 @@ upload_github:
cross: doc
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
test_beta:
beta:
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)
@echo Beta release ready at https://pub.rclone.org/$(TAG)/
@@ -156,6 +153,13 @@ log_since_last_release:
compile_all:
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)
circleci_upload:
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
ifndef BRANCH_PATH
@@ -163,17 +167,15 @@ ifndef BRANCH_PATH
endif
@echo Beta release ready at $(BETA_URL)/testbuilds
beta:
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
endif
git log $(LAST_TAG).. > /tmp/git-log.txt
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
upload_beta: rclone
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
ifndef BRANCH_PATH
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
endif
@echo Beta release ready at $(BETA_URL)

View File

@@ -10,7 +10,6 @@
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)

View File

@@ -1,33 +1,16 @@
---
# Azure pipelines build for rclone
# Parts stolen shamelessly from all round the Internet, especially Caddy
# -*- compile-command: "yamllint -f parsable azure-pipelines.yml" -*-
trigger:
branches:
include:
- '*'
tags:
include:
- '*'
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
GOMAXPROCS: 8 # workaround for cmd/mount tests locking up - see #3154
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
GO_INSTALL_ARCH: amd64
strategy:
matrix:
linux:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: cmount
@@ -36,63 +19,67 @@ strategy:
MAKE_QUICKTEST: true
DEPLOY: true
mac:
imageName: macos-latest
imageName: macos-10.13
gorootDir: /usr/local
GO_VERSION: latest
GOTAGS: "" # cmount doesn't work on osx travis for some reason
BUILD_FLAGS: '-include "^darwin/" -cgo'
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
RACEMAKE_QUICKTEST: true
DEPLOY: true
windows_amd64:
imageName: windows-latest
windows:
imageName: windows-2019
gorootDir: C:\
GO_VERSION: latest
BUILD_FLAGS: '-include "^windows/amd64" -cgo'
MAKE_QUICKTEST: true
DEPLOY: true
windows_386:
imageName: windows-latest
gorootDir: C:\
GO_VERSION: latest
GO_INSTALL_ARCH: 386
BUILD_FLAGS: '-include "^windows/386" -cgo'
BUILD_FLAGS: '-include "^windows/amd64" -cgo' # 386 doesn't build yet
MAKE_QUICKTEST: true
DEPLOY: true
other_os:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
BUILD_FLAGS: '-exclude "^(windows|darwin|linux)/"'
MAKE_COMPILE_ALL: true
DEPLOY: true
modules_race:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: latest
GO111MODULE: on
GOPROXY: https://proxy.golang.org
MAKE_QUICKTEST: true
MAKE_RACEQUICKTEST: true
RACEMAKE_QUICKTEST: true
go1.9:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GOCACHE: '' # build caching only came in go1.10
GO_VERSION: go1.9.7
MAKE_QUICKTEST: true
go1.10:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.10.8
MAKE_QUICKTEST: true
go1.11:
imageName: ubuntu-latest
imageName: ubuntu-16.04
gorootDir: /usr/local
GO_VERSION: go1.11.12
GO_VERSION: go1.11.8
MAKE_QUICKTEST: true
pool:
vmImage: $(imageName)
variables:
GOROOT: $(gorootDir)/go
GOPATH: $(system.defaultWorkingDirectory)/gopath
GOCACHE: $(system.defaultWorkingDirectory)/gocache
GOBIN: $(GOPATH)/bin
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)'
GO111MODULE: 'off'
GOTAGS: cmount
GO_LATEST: false
CPATH: ''
steps:
- bash: |
latestGo=$(curl "https://golang.org/VERSION?m=text")
@@ -115,11 +102,20 @@ steps:
- task: CacheBeta@0
continueOnError: true
inputs:
key: go-build-cache | "$(Agent.JobName)"
key: go-build-cache | $(Agent.JobName)
path: $(GOCACHE)
displayName: Cache go build
condition: ne( variables['GOCACHE'], '' )
- bash: |
mkdir -p $(GOCACHE)
echo "not empty" > $(GOCACHE)/not_empty.txt
echo "GOCACHE=" $(GOCACHE)
ls -R $(GOCACHE)
continueOnError: true
displayName: Create cache dir
condition: ne( variables['GOCACHE'], '' )
# Install Libraries (varies by platform)
- bash: |
@@ -138,43 +134,32 @@ steps:
displayName: Install Libraries on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
choco install -y winfsp zip make
Write-Host "##vso[task.setvariable variable=CPATH]C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GO_INSTALL_ARCH -eq "386") {
choco install -y mingw --forcex86 --force
Write-Host "##vso[task.prependpath]C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Libraries on Windows
# Install Go (this varies by platform)
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
wget "https://dl.google.com/go/$(GO_VERSION).linux-amd64.tar.gz"
sudo mkdir $(gorootDir)
sudo chown ${USER}:${USER} $(gorootDir)
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-$(GO_INSTALL_ARCH).tar.gz"
tar -C $(gorootDir) -xzf "$(GO_VERSION).linux-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Linux' )
displayName: Install Go on Linux
- bash: |
wget "https://dl.google.com/go/$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-$(GO_INSTALL_ARCH).tar.gz"
wget "https://dl.google.com/go/$(GO_VERSION).darwin-amd64.tar.gz"
sudo tar -C $(gorootDir) -xzf "$(GO_VERSION).darwin-amd64.tar.gz"
condition: eq( variables['Agent.OS'], 'Darwin' )
displayName: Install Go on macOS
- powershell: |
$ProgressPreference = 'SilentlyContinue'
Write-Host "Downloading Go $(GO_VERSION) for $(GO_INSTALL_ARCH)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip", "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip")
Write-Host "Extracting Go"
Expand-Archive "$(GO_VERSION).windows-$(GO_INSTALL_ARCH).zip" -DestinationPath "$(gorootDir)"
Write-Host "Downloading Go... (please be patient, I am very slow)"
(New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(GO_VERSION).windows-amd64.zip", "$(GO_VERSION).windows-amd64.zip")
Write-Host "Extracting Go... (I'm slow too)"
Expand-Archive "$(GO_VERSION).windows-amd64.zip" -DestinationPath "$(gorootDir)"
condition: eq( variables['Agent.OS'], 'Windows_NT' )
displayName: Install Go on Windows
@@ -185,16 +170,17 @@ steps:
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
printf "\n\nRclone environment:\n\n"
make vars
workingDirectory: '$(modulePath)'
displayName: Print Go version and environment
# Run Tests
- bash: |
make
make quicktest
workingDirectory: '$(modulePath)'
displayName: Run tests
@@ -204,7 +190,7 @@ steps:
make racequicktest
workingDirectory: '$(modulePath)'
displayName: Race test
condition: eq( variables['MAKE_RACEQUICKTEST'], 'true' )
condition: eq( variables['RACEMAKE_QUICKTEST'], 'true' )
- bash: |
make build_dep
@@ -214,21 +200,13 @@ steps:
condition: eq( variables['MAKE_CHECK'], 'true' )
- bash: |
make beta
make compile_all
workingDirectory: '$(modulePath)'
displayName: Do release build
condition: eq( variables['DEPLOY'], 'true' )
displayName: Compile all architectures test
condition: eq( variables['MAKE_COMPILE_ALL'], 'true' )
- bash: |
make upload_beta
env:
RCLONE_CONFIG_PASS: $(RCLONE_CONFIG_PASS)
BETA_SUBDIR: 'azure_pipelines' # FIXME remove when removing travis/appveyor
make vars # FIXME travis_beta
workingDirectory: '$(modulePath)'
displayName: Upload built binaries
displayName: Deploy built binaries
condition: and( eq( variables['DEPLOY'], 'true' ), ne( variables['Build.Reason'], 'PullRequest' ) )
- publish: $(modulePath)/build
artifact: "rclone-build-$(Agent.JobName)"
displayName: Publish built binaries
condition: eq( variables['DEPLOY'], 'true' )

View File

@@ -33,7 +33,6 @@ import (
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/stretchr/testify/assert"
@@ -356,8 +355,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
require.NoError(t, err)
} else {
testData1 = []byte(random.String(100))
testData2 = []byte(random.String(200))
testData1 = []byte(fstest.RandomString(100))
testData2 = []byte(fstest.RandomString(200))
}
// write the object

View File

@@ -13,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
t.Run("CreateAlbum", func(t *testing.T) {
albumName := "album/rclone-test-" + random.String(24)
albumName := "album/rclone-test-" + fstest.RandomString(24)
err = f.Mkdir(ctx, albumName)
require.NoError(t, err)
remote := albumName + "/" + fileNameAlbum

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env python3
"""
A demo proxy for rclone serve sftp/webdav/ftp etc
This takes the incoming user/pass and converts it into an sftp backend
running on localhost.
"""
import sys
import json
def main():
i = json.load(sys.stdin)
o = {
"type": "sftp", # type of backend
"_root": "", # root of the fs
"_obscure": "pass", # comma sep list of fields to obscure
"user": i["user"],
"pass": i["pass"],
"host": "127.0.0.1",
}
json.dump(o, sys.stdout, indent="\t")
if __name__ == "__main__":
main()

View File

@@ -9,7 +9,6 @@ package cmd
import (
"fmt"
"log"
"math/rand"
"os"
"os/exec"
"path"
@@ -493,7 +492,6 @@ func AddBackendFlags() {
// Main runs rclone interpreting flags and commands out of os.Args
func Main() {
rand.Seed(time.Now().Unix())
setupRootCommand(Root)
AddBackendFlags()
if err := Root.Execute(); err != nil {

View File

@@ -18,7 +18,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/fstest"
"github.com/spf13/cobra"
)
@@ -118,7 +118,7 @@ func (r *results) Print() {
// writeFile writes a file with some random contents
func (r *results) writeFile(path string) (fs.Object, error) {
contents := random.String(50)
contents := fstest.RandomString(50)
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
}

View File

@@ -82,7 +82,7 @@ func checkRelease(shouldUpdate bool) (err error) {
extractPath := filepath.Join(cachePath, "current")
if !exists(cachePath) {
if err := os.MkdirAll(cachePath, 0755); err != nil {
if err := os.MkdirAll(cachePath, 755); err != nil {
fs.Logf(nil, "Error creating cache directory: %s", cachePath)
}
}
@@ -177,14 +177,14 @@ func unzip(src, dest string) (err error) {
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
if err := os.MkdirAll(path, 0755); err != nil {
if err := os.MkdirAll(path, f.Mode()); err != nil {
return err
}
} else {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
if err := os.MkdirAll(filepath.Dir(path), f.Mode()); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}

View File

@@ -5,68 +5,30 @@
package ftp
import (
"bytes"
"errors"
"fmt"
"io"
"net"
"os"
"os/user"
"runtime"
"strconv"
"sync"
ftp "github.com/goftp/server"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/ftp/ftpflags"
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// Options contains options for the http Server
type Options struct {
//TODO add more options
ListenAddr string // Port to listen on
PublicIP string // Passive ports range
PassivePorts string // Passive ports range
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:2121",
PublicIP: "",
PassivePorts: "30000-32000",
BasicUser: "anonymous",
BasicPass: "",
}
// Opt is options set by command line flags
var Opt = DefaultOpt
// AddFlags adds flags for ftp
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
}
func init() {
ftpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
}
// Command definition for cobra
@@ -77,33 +39,12 @@ var Command = &cobra.Command{
rclone serve ftp implements a basic ftp server to serve the
remote over FTP protocol. This can be viewed with a ftp client
or you can make a remote of type ftp to read and write it.
### Server options
Use --addr to specify which IP address and port the server should
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set --addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
#### Authentication
By default this will serve files without needing a login.
You can set a single username and password with the --user and --pass flags.
` + vfs.Help + proxy.Help,
` + ftpopt.Help + vfs.Help,
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
s, err := newServer(f, &Opt)
s, err := newServer(f, &ftpflags.Opt)
if err != nil {
return err
}
@@ -114,17 +55,12 @@ You can set a single username and password with the --user and --pass flags.
// server contains everything to run the server
type server struct {
f fs.Fs
srv *ftp.Server
opt Options
vfs *vfs.VFS
proxy *proxy.Proxy
pendingMu sync.Mutex
pending map[string]*Driver // pending Driver~s that haven't got their VFS
f fs.Fs
srv *ftp.Server
}
// Make a new FTP to serve the remote
func newServer(f fs.Fs, opt *Options) (*server, error) {
func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
host, port, err := net.SplitHostPort(opt.ListenAddr)
if err != nil {
return nil, errors.New("Failed to parse host:port")
@@ -134,31 +70,27 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
return nil, errors.New("Failed to parse host:port")
}
s := &server{
f: f,
opt: *opt,
pending: make(map[string]*Driver),
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(&proxyflags.Opt)
} else {
s.vfs = vfs.New(f, &vfsflags.Opt)
}
ftpopt := &ftp.ServerOpts{
Name: "Rclone FTP Server",
WelcomeMessage: "Welcome to Rclone " + fs.Version + " FTP Server",
Factory: s, // implemented by NewDriver method
Hostname: host,
Port: portNum,
PublicIp: opt.PublicIP,
PassivePorts: opt.PassivePorts,
Auth: s, // implemented by CheckPasswd method
Logger: &Logger{},
WelcomeMessage: "Welcome on Rclone FTP Server",
Factory: &DriverFactory{
vfs: vfs.New(f, &vfsflags.Opt),
},
Hostname: host,
Port: portNum,
PublicIp: opt.PublicIP,
PassivePorts: opt.PassivePorts,
Auth: &Auth{
BasicUser: opt.BasicUser,
BasicPass: opt.BasicPass,
},
Logger: &Logger{},
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
}
s.srv = ftp.NewServer(ftpopt)
return s, nil
return &server{
f: f,
srv: ftp.NewServer(ftpopt),
}, nil
}
// serve runs the ftp server
@@ -200,106 +132,39 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
fs.Infof(sessionID, "< %d %s", code, message)
}
// findID finds the connection ID of the calling program. It does
// this in an incredibly hacky way by looking in the stack trace.
//
// callerName should be the name of the function that we are looking
// for with a trailing '('
//
// What is really needed is a change of calling protocol so
// CheckPassword is called with the connection.
func findID(callerName []byte) (string, error) {
// Dump the stack in this format
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
buf = buf[:n]
// look for callerName first
i := bytes.Index(buf, callerName)
if i < 0 {
return "", errors.Errorf("findID: caller name not found in:\n%s", buf)
}
buf = buf[i+len(callerName):]
// find next ')'
i = bytes.IndexByte(buf, ')')
if i < 0 {
return "", errors.Errorf("findID: end of args not found in:\n%s", buf)
}
buf = buf[:i]
// trim off first argument
// find next ','
i = bytes.IndexByte(buf, ',')
if i >= 0 {
buf = buf[:i]
}
return string(buf), nil
//Auth struct to handle ftp auth (temporary simple for POC)
type Auth struct {
BasicUser string
BasicPass string
}
var connServeFunction = []byte("(*Conn).Serve(")
// CheckPasswd handle auth based on configuration
func (s *server) CheckPasswd(user, pass string) (ok bool, err error) {
var VFS *vfs.VFS
if s.proxy != nil {
VFS, _, err = s.proxy.Call(user, pass)
if err != nil {
fs.Infof(nil, "proxy login failed: %v", err)
return false, nil
}
id, err := findID(connServeFunction)
if err != nil {
fs.Infof(nil, "proxy login failed: failed to read ID from stack: %v", err)
return false, nil
}
s.pendingMu.Lock()
d := s.pending[id]
delete(s.pending, id)
s.pendingMu.Unlock()
if d == nil {
return false, errors.Errorf("proxy login failed: failed to find pending Driver under ID %q", id)
}
d.vfs = VFS
} else {
ok = s.opt.BasicUser == user && (s.opt.BasicPass == "" || s.opt.BasicPass == pass)
if !ok {
fs.Infof(nil, "login failed: bad credentials")
return false, nil
}
}
return true, nil
//CheckPasswd handle auth based on configuration
func (a *Auth) CheckPasswd(user, pass string) (bool, error) {
return a.BasicUser == user && (a.BasicPass == "" || a.BasicPass == pass), nil
}
// NewDriver starts a new session for each client connection
func (s *server) NewDriver() (ftp.Driver, error) {
//DriverFactory factory of ftp driver for each session
type DriverFactory struct {
vfs *vfs.VFS
}
//NewDriver start a new session
func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
log.Trace("", "Init driver")("")
d := &Driver{
s: s,
vfs: s.vfs, // this can be nil if proxy set
}
return d, nil
return &Driver{
vfs: f.vfs,
}, nil
}
//Driver implementation of ftp server
type Driver struct {
s *server
vfs *vfs.VFS
lock sync.Mutex
}
//Init a connection
func (d *Driver) Init(c *ftp.Conn) {
func (d *Driver) Init(*ftp.Conn) {
defer log.Trace("", "Init session")("")
if d.s.proxy != nil {
id := fmt.Sprintf("%p", c)
d.s.pendingMu.Lock()
d.s.pending[id] = d
d.s.pendingMu.Unlock()
}
}
//Stat get information on file or folder

View File

@@ -8,72 +8,83 @@
package ftp
import (
"context"
"fmt"
"os"
"os/exec"
"testing"
ftp "github.com/goftp/server"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
testHOST = "localhost"
testPORT = "51780"
testPASSIVEPORTRANGE = "30000-32000"
testUSER = "rclone"
testPASS = "password"
)
// TestFTP runs the ftp server then runs the unit tests for the
// ftp remote against it.
func TestFTP(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := DefaultOpt
opt.ListenAddr = testHOST + ":" + testPORT
opt.PassivePorts = testPASSIVEPORTRANGE
opt.BasicUser = testUSER
opt.BasicPass = testPASS
opt := ftpopt.DefaultOpt
opt.ListenAddr = testHOST + ":" + testPORT
opt.PassivePorts = testPASSIVEPORTRANGE
opt.BasicUser = "rclone"
opt.BasicPass = "password"
w, err := newServer(f, &opt)
assert.NoError(t, err)
fstest.Initialise()
quit := make(chan struct{})
go func() {
err := w.serve()
close(quit)
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "ftp",
"host": testHOST,
"port": testPORT,
"user": testUSER,
"pass": obscure.MustObscure(testPASS),
}
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
return config, func() {
err := w.close()
// Start the server
w, err := newServer(fremote, &opt)
assert.NoError(t, err)
go func() {
err := w.serve()
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
<-quit
}
}()
defer func() {
err := w.close()
assert.NoError(t, err)
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/ftp")
assert.NoError(t, err, "failed to cd to ftp remote")
// Run the ftp tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
servetest.Run(t, "ftp", start)
}
func TestFindID(t *testing.T) {
id, err := findID([]byte("TestFindID("))
require.NoError(t, err)
// id should be the argument to this function
assert.Equal(t, fmt.Sprintf("%p", t), id)
if *fstest.Verbose {
args = append(args, "-verbose")
}
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
args = append(args, "-remote", "ftptest:")
cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_FTPTEST_TYPE=ftp",
"RCLONE_CONFIG_FTPTEST_HOST="+testHOST,
"RCLONE_CONFIG_FTPTEST_PORT="+testPORT,
"RCLONE_CONFIG_FTPTEST_USER=rclone",
"RCLONE_CONFIG_FTPTEST_PASS=0HU5Hx42YiLoNGJxppOOP3QTbr-KB_MP", // ./rclone obscure password
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running ftp integration tests")
}

View File

@@ -0,0 +1,28 @@
package ftpflags
import (
"github.com/rclone/rclone/cmd/serve/ftp/ftpopt"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = ftpopt.DefaultOpt
)
// AddFlagsPrefix adds flags for the ftpopt
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
}
// AddFlags adds flags for the httplib
func AddFlags(flagSet *pflag.FlagSet) {
AddFlagsPrefix(flagSet, "", &Opt)
}

View File

@@ -0,0 +1,40 @@
package ftpopt
// Help contains text describing the http server to add to the command
// help.
var Help = `
### Server options
Use --addr to specify which IP address and port the server should
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
If you set --addr to listen on a public or LAN accessible IP address
then using Authentication is advised - see the next section for info.
#### Authentication
By default this will serve files without needing a login.
You can set a single username and password with the --user and --pass flags.
`
// Options contains options for the http Server
type Options struct {
//TODO add more options
ListenAddr string // Port to listen on
PublicIP string // Passive ports range
PassivePorts string // Passive ports range
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:2121",
PublicIP: "",
PassivePorts: "30000-32000",
BasicUser: "anonymous",
BasicPass: "",
}

View File

@@ -68,7 +68,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
}
mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
mux.HandleFunc("/", s.handler)
return s
}
@@ -93,10 +93,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
urlPath, ok := s.Path(w, r)
if !ok {
return
}
urlPath := r.URL.Path
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if isDir {

View File

@@ -26,9 +26,6 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
if prefix == "" {
flags.StringVarP(flagSet, &Opt.Prefix, prefix+"prefix", "", Opt.Prefix, "Prefix for URLs.")
}
}
// AddFlags adds flags for the httplib

View File

@@ -44,11 +44,6 @@ for a transfer.
--max-header-bytes controls the maximum number of bytes the server will
accept in the HTTP header.
--prefix controls the URL prefix that rclone serves from. By default
rclone will serve from the root. If you used --prefix "rclone" then
rclone would serve from a URL starting with "/rclone/". This is
useful if you wish to proxy rclone serve.
#### Authentication
By default this will serve files without needing a login.
@@ -86,7 +81,6 @@ certificate authority certificate.
// Options contains options for the http Server
type Options struct {
ListenAddr string // Port to listen on
Prefix string // prefix to strip from URLs
ServerReadTimeout time.Duration // Timeout for server reading data
ServerWriteTimeout time.Duration // Timeout for server writing data
MaxHeaderBytes int // Maximum size of request header
@@ -97,15 +91,8 @@ type Options struct {
Realm string // realm for authentication
BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser
Auth AuthFn // custom Auth (not set by command line flags)
}
// AuthFn if used will be used to authenticate user, pass. If an error
// is returned then the user is not authenticated.
//
// If a non nil value is returned then it is added to the context under the key
type AuthFn func(user, pass string) (value interface{}, err error)
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:8080",
@@ -130,14 +117,9 @@ type Server struct {
type contextUserType struct{}
// ContextUserKey is a simple context key for storing the username of the request
// ContextUserKey is a simple context key
var ContextUserKey = &contextUserType{}
type contextAuthType struct{}
// ContextAuthKey is a simple context key for storing info returned by AuthFn
var ContextAuthKey = &contextAuthType{}
// singleUserProvider provides the encrypted password for a single user
func (s *Server) singleUserProvider(user, realm string) string {
if user == s.Opt.BasicUser {
@@ -146,27 +128,6 @@ func (s *Server) singleUserProvider(user, realm string) string {
return ""
}
// parseAuthorization parses the Authorization header into user, pass
// it returns a boolean as to whether the parse was successful
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
authHeader := r.Header.Get("Authorization")
if authHeader != "" {
s := strings.SplitN(authHeader, " ", 2)
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
parts := strings.SplitN(string(b), ":", 2)
user = parts[0]
if len(parts) > 1 {
pass = parts[1]
ok = true
}
}
}
}
return
}
// NewServer creates an http server. The opt can be nil in which case
// the default options will be used.
func NewServer(handler http.Handler, opt *Options) *Server {
@@ -182,20 +143,17 @@ func NewServer(handler http.Handler, opt *Options) *Server {
}
// Use htpasswd if required on everything
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil {
var authenticator *auth.BasicAuth
if s.Opt.Auth == nil {
var secretProvider auth.SecretProvider
if s.Opt.HtPasswd != "" {
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
} else {
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
secretProvider = s.singleUserProvider
}
authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" {
var secretProvider auth.SecretProvider
if s.Opt.HtPasswd != "" {
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
} else {
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
secretProvider = s.singleUserProvider
}
authenticator := auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
oldHandler := handler
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// No auth wanted for OPTIONS method
@@ -203,36 +161,26 @@ func NewServer(handler http.Handler, opt *Options) *Server {
oldHandler.ServeHTTP(w, r)
return
}
unauthorized := func() {
w.Header().Set("Content-Type", "text/plain")
w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`)
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
}
user, pass, authValid := parseAuthorization(r)
if !authValid {
unauthorized()
return
}
if s.Opt.Auth == nil {
if username := authenticator.CheckAuth(r); username == "" {
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
unauthorized()
return
if username := authenticator.CheckAuth(r); username == "" {
authHeader := r.Header.Get(authenticator.Headers.V().Authorization)
if authHeader != "" {
s := strings.SplitN(authHeader, " ", 2)
var userName = "UNKNOWN"
if len(s) == 2 && s[0] == "Basic" {
b, err := base64.StdEncoding.DecodeString(s[1])
if err == nil {
userName = strings.SplitN(string(b), ":", 2)[0]
}
}
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, userName)
} else {
fs.Infof(r.URL.Path, "%s: Basic auth challenge sent", r.RemoteAddr)
}
authenticator.RequireAuth(w, r)
} else {
// Custom Auth
value, err := s.Opt.Auth(user, pass)
if err != nil {
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
unauthorized()
return
}
if value != nil {
r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value))
}
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, username))
oldHandler.ServeHTTP(w, r)
}
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user))
oldHandler.ServeHTTP(w, r)
})
s.usingAuth = true
}
@@ -242,14 +190,6 @@ func NewServer(handler http.Handler, opt *Options) *Server {
log.Fatalf("Need both -cert and -key to use SSL")
}
// If a Path is set then serve from there
if strings.HasSuffix(s.Opt.Prefix, "/") {
s.Opt.Prefix = s.Opt.Prefix[:len(s.Opt.Prefix)-1]
}
if s.Opt.Prefix != "" && !strings.HasPrefix(s.Opt.Prefix, "/") {
s.Opt.Prefix = "/" + s.Opt.Prefix
}
// FIXME make a transport?
s.httpServer = &http.Server{
Addr: s.Opt.ListenAddr,
@@ -359,27 +299,10 @@ func (s *Server) URL() string {
// (i.e. port assigned by operating system)
addr = s.listener.Addr().String()
}
return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.Prefix)
return fmt.Sprintf("%s://%s/", proto, addr)
}
// UsingAuth returns true if authentication is required
func (s *Server) UsingAuth() bool {
return s.usingAuth
}
// Path returns the current path with the Prefix stripped
//
// If it returns false, then the path was invalid and the handler
// should exit as the error response has already been sent
func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) {
Path = r.URL.Path
if s.Opt.Prefix == "" {
return Path, true
}
if !strings.HasPrefix(Path, s.Opt.Prefix+"/") {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return Path, false
}
Path = Path[len(s.Opt.Prefix):]
return Path, true
}

View File

@@ -1,270 +0,0 @@
// Package proxy implements a programmable proxy for rclone serve
package proxy
import (
"bytes"
"encoding/json"
"os/exec"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
libcache "github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"golang.org/x/crypto/bcrypt"
)
// Help contains text describing how to use the proxy
var Help = strings.Replace(`
### Auth Proxy
If you supply the parameter |--auth-proxy /path/to/program| then
rclone will use that program to generate backends on the fly which
then are used to authenticate incoming requests. This uses a simple
JSON based protocl with input on STDIN and output on STDOUT.
There is an example program
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
in the rclone source code.
The program's job is to take a |user| and |pass| on the input and turn
those into the config for a backend on STDOUT in JSON format. This
config will have any default parameters for the backend added, but it
won't use configuration from environment variables or command line
options - it is the job of the proxy program to make a complete
config.
This config generated must have this extra parameter
- |_root| - root to use for the backend
And it may have this parameter
- |_obscure| - comma separated strings for parameters to obscure
For example the program might take this on STDIN
|||
{
"user": "me",
"pass": "mypassword"
}
|||
And return this on STDOUT
|||
{
"type": "sftp",
"_root": "",
"_obscure": "pass",
"user": "me",
"pass": "mypassword",
"host": "sftp.example.com"
}
|||
This would mean that an SFTP backend would be created on the fly for
the |user| and |pass| returned in the output to the host given. Note
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
parameter before creating the backend (which is required for sftp
backends).
The progam can manipulate the supplied |user| in any way, for example
to make proxy to many different sftp backends, you could make the
|user| be |user@example.com| and then set the |host| to |example.com|
in the output and the user to |user|. For security you'd probably want
to restrict the |host| to a limited list.
Note that an internal cache is keyed on |user| so only use that for
configuration, don't use |pass|. This also means that if a user's
password is changed the cache will need to expire (which takes 5 mins)
before it takes effect.
This can be used to build general purpose proxies to any kind of
backend that rclone supports.
`, "|", "`", -1)
// Options is options for creating the proxy
type Options struct {
AuthProxy string
}
// DefaultOpt is the default values uses for Opt
var DefaultOpt = Options{
AuthProxy: "",
}
// Proxy represents a proxy to turn auth requests into a VFS
type Proxy struct {
cmdLine []string // broken down command line
vfsCache *libcache.Cache
Opt Options
}
// cacheEntry is what is stored in the vfsCache
type cacheEntry struct {
vfs *vfs.VFS // stored VFS
pwHash []byte // bcrypt hash of the password
}
// New creates a new proxy with the Options passed in
func New(opt *Options) *Proxy {
return &Proxy{
Opt: *opt,
cmdLine: strings.Fields(opt.AuthProxy),
vfsCache: libcache.New(),
}
}
// run the proxy command returning a config map
func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
cmd := exec.Command(p.cmdLine[0], p.cmdLine[1:]...)
inBytes, err := json.MarshalIndent(in, "", "\t")
if err != nil {
return nil, errors.Wrap(err, "Proxy.Call failed to marshal input: %v")
}
var stdout, stderr bytes.Buffer
cmd.Stdin = bytes.NewBuffer(inBytes)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
start := time.Now()
err = cmd.Run()
fs.Debugf(nil, "Calling proxy %v", p.cmdLine)
duration := time.Since(start)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed on %v: %q", p.cmdLine, strings.TrimSpace(string(stderr.Bytes())))
}
err = json.Unmarshal(stdout.Bytes(), &config)
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to read output: %q", string(stdout.Bytes()))
}
fs.Debugf(nil, "Proxy returned in %v", duration)
// Obscure any values in the config map that need it
obscureFields, ok := config.Get("_obscure")
if ok {
for _, key := range strings.Split(obscureFields, ",") {
value, ok := config.Get(key)
if ok {
obscuredValue, err := obscure.Obscure(value)
if err != nil {
return nil, errors.Wrap(err, "proxy")
}
config.Set(key, obscuredValue)
}
}
}
return config, nil
}
// call runs the auth proxy and returns a cacheEntry and an error
func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}, err error) {
// Contact the proxy
config, err := p.run(map[string]string{
"user": user,
"pass": pass,
})
if err != nil {
return nil, err
}
// Look for required fields in the answer
fsName, ok := config.Get("type")
if !ok {
return nil, errors.New("proxy: type not set in result")
}
root, ok := config.Get("_root")
if !ok {
return nil, errors.New("proxy: _root not set in result")
}
// Find the backend
fsInfo, err := fs.Find(fsName)
if err != nil {
return nil, errors.Wrapf(err, "proxy: couldn't find backend for %q", fsName)
}
// base name of config on user name. This may appear in logs
name := "proxy-" + user
fsString := name + ":" + root
// Look for fs in the VFS cache
value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) {
// Create the Fs from the cache
f, err := cache.GetFn(fsString, func(fsString string) (fs.Fs, error) {
// Update the config with the default values
for i := range fsInfo.Options {
o := &fsInfo.Options[i]
if _, found := config.Get(o.Name); !found && o.Default != nil && o.String() != "" {
config.Set(o.Name, o.String())
}
}
return fsInfo.NewFs(name, root, config)
})
if err != nil {
return nil, false, err
}
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
if err != nil {
return nil, false, err
}
entry := cacheEntry{
vfs: vfs.New(f, &vfsflags.Opt),
pwHash: pwHash,
}
return entry, true, nil
})
if err != nil {
return nil, errors.Wrapf(err, "proxy: failed to create backend")
}
return value, nil
}
// Call runs the auth proxy with the given input, returning a *vfs.VFS
// and the key used in the VFS cache.
func (p *Proxy) Call(user, pass string) (VFS *vfs.VFS, vfsKey string, err error) {
var passwordBytes = []byte(pass)
// Look in the cache first
value, ok := p.vfsCache.GetMaybe(user)
// If not found then call the proxy for a fresh answer
if !ok {
value, err = p.call(user, pass, passwordBytes)
if err != nil {
return nil, "", err
}
}
// check we got what we were expecting
entry, ok := value.(cacheEntry)
if !ok {
return nil, "", errors.Errorf("proxy: value is not cache entry: %#v", value)
}
// Check the password is correct in the cached entry. This
// prevents an attack where subsequent requests for the same
// user don't have their auth checked. It does mean that if
// the password is changed, the user will have to wait for
// cache expiry (5m) before trying again.
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
if err != nil {
return nil, "", errors.Wrap(err, "proxy: incorrect password")
}
return entry.vfs, user, nil
}
// Get VFS from the cache using key - returns nil if not found
func (p *Proxy) Get(key string) *vfs.VFS {
value, ok := p.vfsCache.GetMaybe(key)
if !ok {
return nil
}
entry := value.(cacheEntry)
return entry.vfs
}

View File

@@ -1,41 +0,0 @@
// +build ignore
// A simple auth proxy for testing purposes
package main
import (
"encoding/json"
"log"
"os"
)
func main() {
// Read the input
var in map[string]string
err := json.NewDecoder(os.Stdin).Decode(&in)
if err != nil {
log.Fatal(err)
}
// Write the output
var out = map[string]string{}
for k, v := range in {
switch k {
case "user":
v += "-test"
case "error":
log.Fatal(v)
}
out[k] = v
}
if out["type"] == "" {
out["type"] = "local"
}
if out["_root"] == "" {
out["_root"] = ""
}
json.NewEncoder(os.Stdout).Encode(&out)
if err != nil {
log.Fatal(err)
}
}

View File

@@ -1,145 +0,0 @@
package proxy
import (
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/bcrypt"
)
func TestRun(t *testing.T) {
opt := DefaultOpt
cmd := "go run proxy_code.go"
opt.AuthProxy = cmd
p := New(&opt)
t.Run("Normal", func(t *testing.T) {
config, err := p.run(map[string]string{
"type": "ftp",
"user": "me",
"pass": "pass",
"host": "127.0.0.1",
})
require.NoError(t, err)
assert.Equal(t, configmap.Simple{
"type": "ftp",
"user": "me-test",
"pass": "pass",
"host": "127.0.0.1",
"_root": "",
}, config)
})
t.Run("Error", func(t *testing.T) {
config, err := p.run(map[string]string{
"error": "potato",
})
assert.Nil(t, config)
require.Error(t, err)
require.Contains(t, err.Error(), "potato")
})
t.Run("Obscure", func(t *testing.T) {
config, err := p.run(map[string]string{
"type": "ftp",
"user": "me",
"pass": "pass",
"host": "127.0.0.1",
"_obscure": "pass,user",
})
require.NoError(t, err)
config["user"] = obscure.MustReveal(config["user"])
config["pass"] = obscure.MustReveal(config["pass"])
assert.Equal(t, configmap.Simple{
"type": "ftp",
"user": "me-test",
"pass": "pass",
"host": "127.0.0.1",
"_obscure": "pass,user",
"_root": "",
}, config)
})
const testUser = "testUser"
const testPass = "testPass"
t.Run("call", func(t *testing.T) {
// check cache empty
assert.Equal(t, 0, p.vfsCache.Entries())
defer p.vfsCache.Clear()
passwordBytes := []byte(testPass)
value, err := p.call(testUser, testPass, passwordBytes)
require.NoError(t, err)
entry, ok := value.(cacheEntry)
require.True(t, ok)
// check hash is correct in entry
err = bcrypt.CompareHashAndPassword(entry.pwHash, passwordBytes)
require.NoError(t, err)
require.NotNil(t, entry.vfs)
f := entry.vfs.Fs()
require.NotNil(t, f)
assert.Equal(t, "proxy-"+testUser, f.Name())
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
// check it is in the cache
assert.Equal(t, 1, p.vfsCache.Entries())
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
assert.True(t, ok)
assert.Equal(t, value, cacheValue)
})
t.Run("Call", func(t *testing.T) {
// check cache empty
assert.Equal(t, 0, p.vfsCache.Entries())
defer p.vfsCache.Clear()
vfs, vfsKey, err := p.Call(testUser, testPass)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check it is in the cache
assert.Equal(t, 1, p.vfsCache.Entries())
cacheValue, ok := p.vfsCache.GetMaybe(testUser)
assert.True(t, ok)
cacheEntry, ok := cacheValue.(cacheEntry)
assert.True(t, ok)
assert.Equal(t, vfs, cacheEntry.vfs)
// Test Get works while we have something in the cache
t.Run("Get", func(t *testing.T) {
assert.Equal(t, vfs, p.Get(testUser))
assert.Nil(t, p.Get("unknown"))
})
// now try again from the cache
vfs, vfsKey, err = p.Call(testUser, testPass)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check cache is at the same level
assert.Equal(t, 1, p.vfsCache.Entries())
// now try again from the cache but with wrong password
vfs, vfsKey, err = p.Call(testUser, testPass+"wrong")
require.Error(t, err)
require.Contains(t, err.Error(), "incorrect password")
require.Nil(t, vfs)
require.Equal(t, "", vfsKey)
// check cache is at the same level
assert.Equal(t, 1, p.vfsCache.Entries())
})
}

View File

@@ -1,18 +0,0 @@
// Package proxyflags implements command line flags to set up a proxy
package proxyflags
import (
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = proxy.DefaultOpt
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.")
}

View File

@@ -171,7 +171,7 @@ func newServer(f fs.Fs, opt *httplib.Options) *server {
Server: httplib.NewServer(mux, opt),
f: f,
}
mux.HandleFunc(s.Opt.Prefix+"/", s.handler)
mux.HandleFunc("/", s.handler)
return s
}
@@ -211,10 +211,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
path, ok := s.Path(w, r)
if !ok {
return
}
path := r.URL.Path
remote := makeRemote(path)
fs.Debugf(s.f, "%s %s", r.Method, path)

View File

@@ -1,35 +0,0 @@
// +build ignore
// A simple auth proxy for testing purposes
package main
import (
"encoding/json"
"log"
"os"
)
func main() {
if len(os.Args) < 2 {
log.Fatalf("Syntax: %s <root>", os.Args[0])
}
root := os.Args[1]
// Read the input
var in map[string]string
err := json.NewDecoder(os.Stdin).Decode(&in)
if err != nil {
log.Fatal(err)
}
// Write the output
var out = map[string]string{
"type": "local",
"_root": root,
"_obscure": "pass",
}
json.NewEncoder(os.Stdout).Encode(&out)
if err != nil {
log.Fatal(err)
}
}

View File

@@ -1,107 +0,0 @@
// Package servetest provides infrastructure for running loopback
// tests of "rclone serve backend:" against the backend integration
// tests.
package servetest
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// StartFn describes the callback which should start the server with
// the Fs passed in.
// It should return a config for the backend used to connect to the
// server and a clean up function
type StartFn func(f fs.Fs) (configmap.Simple, func())
// run runs the server then runs the unit tests for the remote against
// it.
func run(t *testing.T, name string, start StartFn, useProxy bool) {
fstest.Initialise()
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
f := fremote
if useProxy {
// If using a proxy don't pass in the backend
f = nil
// the backend config will be made by the proxy
prog, err := filepath.Abs("../servetest/proxy_code.go")
require.NoError(t, err)
cmd := "go run " + prog + " " + fremote.Root()
// FIXME this is untidy setting a global variable!
proxyflags.Opt.AuthProxy = cmd
defer func() {
proxyflags.Opt.AuthProxy = ""
}()
}
config, cleanup := start(f)
defer cleanup()
// Change directory to run the tests
cwd, err := os.Getwd()
require.NoError(t, err)
err = os.Chdir("../../../backend/" + name)
require.NoError(t, err, "failed to cd to "+name+" backend")
defer func() {
// Change back to the old directory
require.NoError(t, os.Chdir(cwd))
}()
// Run the backend tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
if *fstest.Verbose {
args = append(args, "-verbose")
}
remoteName := name + "test:"
args = append(args, "-remote", remoteName)
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
cmd := exec.Command("go", args...)
// Configure the backend with environment variables
cmd.Env = os.Environ()
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
for k, v := range config {
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
}
// Run the test
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running "+name+" integration tests")
}
// Run runs the server then runs the unit tests for the remote against
// it.
func Run(t *testing.T, name string, start StartFn) {
t.Run("Normal", func(t *testing.T) {
run(t, name, start, false)
})
t.Run("AuthProxy", func(t *testing.T) {
run(t, name, start, true)
})
}

View File

@@ -47,6 +47,7 @@ func shellUnEscape(str string) string {
// Info about the current connection
type conn struct {
vfs *vfs.VFS
f fs.Fs
handlers sftp.Handlers
what string
}
@@ -64,7 +65,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
switch binary {
case "df":
about := c.vfs.Fs().Features().About
about := c.f.Features().About
if about == nil {
return errors.New("df not supported")
}
@@ -120,7 +121,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
// special cases for rclone command detection
switch args {
case "'abc' | md5sum":
if c.vfs.Fs().Hashes().Contains(hash.MD5) {
if c.f.Hashes().Contains(hash.MD5) {
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
if err != nil {
return errors.Wrap(err, "send output failed")
@@ -129,7 +130,7 @@ func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (
return errors.New("md5 hash not supported")
}
case "'abc' | sha1sum":
if c.vfs.Fs().Hashes().Contains(hash.SHA1) {
if c.f.Hashes().Contains(hash.SHA1) {
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
if err != nil {
return errors.Wrap(err, "send output failed")

View File

@@ -19,14 +19,14 @@ type vfsHandler struct {
}
// vfsHandler returns a Handlers object with the test handlers.
func newVFSHandler(vfs *vfs.VFS) sftp.Handlers {
func newVFSHandler(vfs *vfs.VFS) (sftp.Handlers, error) {
v := vfsHandler{VFS: vfs}
return sftp.Handlers{
FileGet: v,
FilePut: v,
FileCmd: v,
FileList: v,
}
}, nil
}
func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) {

View File

@@ -18,8 +18,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/env"
@@ -34,47 +33,21 @@ type server struct {
opt Options
vfs *vfs.VFS
config *ssh.ServerConfig
handlers sftp.Handlers
listener net.Listener
waitChan chan struct{} // for waiting on the listener to close
proxy *proxy.Proxy
}
func newServer(f fs.Fs, opt *Options) *server {
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
opt: *opt,
waitChan: make(chan struct{}),
}
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(&proxyflags.Opt)
} else {
s.vfs = vfs.New(f, &vfsflags.Opt)
}
return s
}
// getVFS gets the vfs from s or the proxy
func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
if s.proxy == nil {
return s.vfs
}
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
fs.Infof(what, "SSH Permissions Extensions not found")
return nil
}
key := sshConn.Permissions.Extensions["_vfsKey"]
if key == "" {
fs.Infof(what, "VFS key not found")
return nil
}
VFS = s.proxy.Get(key)
if VFS == nil {
fs.Infof(what, "failed to read VFS from cache")
return nil
}
return VFS
}
func (s *server) acceptConnections() {
for {
nConn, err := s.listener.Accept()
@@ -100,15 +73,11 @@ func (s *server) acceptConnections() {
go ssh.DiscardRequests(reqs)
c := &conn{
what: what,
vfs: s.getVFS(what, sshConn),
vfs: s.vfs,
f: s.f,
handlers: s.handlers,
what: what,
}
if c.vfs == nil {
fs.Infof(what, "Closing unauthenticated connection (couldn't find VFS)")
_ = nConn.Close()
continue
}
c.handlers = newVFSHandler(c.vfs)
// Accept all channels
go c.handleChannels(chans)
@@ -140,19 +109,7 @@ func (s *server) serve() (err error) {
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
if s.proxy != nil {
// query the proxy for the config
_, vfsKey, err := s.proxy.Call(c.User(), string(pass))
if err != nil {
return nil, err
}
// just return the Key so we can get it back from the cache
return &ssh.Permissions{
Extensions: map[string]string{
"_vfsKey": vfsKey,
},
}, nil
} else if s.opt.User != "" && s.opt.Pass != "" {
if s.opt.User != "" && s.opt.Pass != "" {
userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User))
passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass))
if (userOK & passOK) == 1 {
@@ -163,9 +120,6 @@ func (s *server) serve() (err error) {
},
PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User())
if s.proxy != nil {
return nil, errors.New("public key login not allowed when using auth proxy")
}
if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok {
return &ssh.Permissions{
// Record the public key used for authentication.
@@ -224,6 +178,11 @@ func (s *server) serve() (err error) {
}
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
s.handlers, err = newVFSHandler(s.vfs)
if err != nil {
return errors.Wrap(err, "serve sftp: failed to create fs")
}
go s.acceptConnections()
return nil

View File

@@ -6,9 +6,6 @@ package sftp
import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
@@ -49,7 +46,6 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags(), &Opt)
}
@@ -88,15 +84,10 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
` + vfs.Help + proxy.Help,
` + vfs.Help,
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, &Opt)
err := s.Serve()

View File

@@ -8,15 +8,16 @@
package sftp
import (
"context"
"os"
"os/exec"
"strings"
"testing"
"github.com/pkg/sftp"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
)
@@ -37,35 +38,58 @@ var (
// TestSftp runs the sftp server then runs the unit tests for the
// sftp remote against it.
func TestSftp(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := DefaultOpt
opt.ListenAddr = testBindAddress
opt.User = testUser
opt.Pass = testPass
fstest.Initialise()
w := newServer(f, &opt)
assert.NoError(t, w.serve())
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
// Read the host and port we started on
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "sftp",
"user": testUser,
"pass": obscure.MustObscure(testPass),
"host": addr[:colon],
"port": addr[colon+1:],
}
opt := DefaultOpt
opt.ListenAddr = testBindAddress
opt.User = testUser
opt.Pass = testPass
// return a stop function
return config, func() {
w.Close()
w.Wait()
}
// Start the server
w := newServer(fremote, &opt)
assert.NoError(t, w.serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/sftp")
assert.NoError(t, err, "failed to cd to sftp backend")
// Run the sftp tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
servetest.Run(t, "sftp", start)
if *fstest.Verbose {
args = append(args, "-verbose")
}
args = append(args, "-remote", "sftptest:")
cmd := exec.Command("go", args...)
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
if colon < 0 {
panic("need a : in the address: " + addr)
}
host, port := addr[:colon], addr[colon+1:]
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_SFTPTEST_TYPE=sftp",
"RCLONE_CONFIG_SFTPTEST_HOST="+host,
"RCLONE_CONFIG_SFTPTEST_PORT="+port,
"RCLONE_CONFIG_SFTPTEST_USER="+testUser,
"RCLONE_CONFIG_SFTPTEST_PASS="+obscure.MustObscure(testPass),
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running sftp integration tests")
}

View File

@@ -12,11 +12,9 @@ import (
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
@@ -32,7 +30,6 @@ var (
func init() {
httpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
Command.Flags().StringVar(&hashName, "etag-hash", "", "Which hash to use for the ETag, or auto or blank for off")
Command.Flags().BoolVar(&disableGETDir, "disable-dir-list", false, "Disable HTML directory list on GET request for a directory")
}
@@ -60,15 +57,10 @@ supported hash on the backend or you can use a named hash such as
Use "rclone hashsum" to see the full list.
` + httplib.Help + vfs.Help + proxy.Help,
` + httplib.Help + vfs.Help,
RunE: func(command *cobra.Command, args []string) error {
var f fs.Fs
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
hashType = hash.None
if hashName == "auto" {
hashType = f.Hashes().GetOne()
@@ -109,9 +101,8 @@ Use "rclone hashsum" to see the full list.
type WebDAV struct {
*httplib.Server
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
vfs *vfs.VFS
webdavhandler *webdav.Handler
proxy *proxy.Proxy
}
// check interface
@@ -120,58 +111,21 @@ var _ webdav.FileSystem = (*WebDAV)(nil)
// Make a new WebDAV to serve the remote
func newWebDAV(f fs.Fs, opt *httplib.Options) *WebDAV {
w := &WebDAV{
f: f,
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
}
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(&proxyflags.Opt)
// override auth
copyOpt := *opt
copyOpt.Auth = w.auth
opt = &copyOpt
} else {
w._vfs = vfs.New(f, &vfsflags.Opt)
}
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
webdavHandler := &webdav.Handler{
Prefix: w.Server.Opt.Prefix,
FileSystem: w,
LockSystem: webdav.NewMemLS(),
Logger: w.logRequest, // FIXME
}
w.webdavhandler = webdavHandler
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
return w
}
// Gets the VFS in use for this request
func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
if w._vfs != nil {
return w._vfs, nil
}
value := ctx.Value(httplib.ContextAuthKey)
if value == nil {
return nil, errors.New("no VFS found in context")
}
VFS, ok := value.(*vfs.VFS)
if !ok {
return nil, errors.Errorf("context value is not VFS: %#v", value)
}
return VFS, nil
}
// auth does proxy authorization
func (w *WebDAV) auth(user, pass string) (value interface{}, err error) {
VFS, _, err := w.proxy.Call(user, pass)
if err != nil {
return nil, err
}
return VFS, err
}
func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
urlPath, ok := w.Path(rw, r)
if !ok {
return
}
urlPath := r.URL.Path
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
@@ -184,14 +138,8 @@ func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
// serveDir serves a directory index at dirRemote
// This is similar to serveDir in serve http.
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
VFS, err := w.getVFS(r.Context())
if err != nil {
http.Error(rw, "Root directory not found", http.StatusNotFound)
fs.Errorf(nil, "Failed to serve directory: %v", err)
return
}
// List the directory
node, err := VFS.Stat(dirRemote)
node, err := w.vfs.Stat(dirRemote)
if err == vfs.ENOENT {
http.Error(rw, "Directory not found", http.StatusNotFound)
return
@@ -238,12 +186,8 @@ func (w *WebDAV) logRequest(r *http.Request, err error) {
// Mkdir creates a directory
func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err error) {
// defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
dir, leaf, err := VFS.StatParent(name)
defer log.Trace(name, "perm=%v", perm)("err = %v", &err)
dir, leaf, err := w.vfs.StatParent(name)
if err != nil {
return err
}
@@ -253,12 +197,8 @@ func (w *WebDAV) Mkdir(ctx context.Context, name string, perm os.FileMode) (err
// OpenFile opens a file or a directory
func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.FileMode) (file webdav.File, err error) {
// defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
VFS, err := w.getVFS(ctx)
if err != nil {
return nil, err
}
f, err := VFS.OpenFile(name, flags, perm)
defer log.Trace(name, "flags=%v, perm=%v", flags, perm)("err = %v", &err)
f, err := w.vfs.OpenFile(name, flags, perm)
if err != nil {
return nil, err
}
@@ -267,12 +207,8 @@ func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.F
// RemoveAll removes a file or a directory and its contents
func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
// defer log.Trace(name, "")("err = %v", &err)
VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
node, err := VFS.Stat(name)
defer log.Trace(name, "")("err = %v", &err)
node, err := w.vfs.Stat(name)
if err != nil {
return err
}
@@ -285,22 +221,14 @@ func (w *WebDAV) RemoveAll(ctx context.Context, name string) (err error) {
// Rename a file or a directory
func (w *WebDAV) Rename(ctx context.Context, oldName, newName string) (err error) {
// defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
VFS, err := w.getVFS(ctx)
if err != nil {
return err
}
return VFS.Rename(oldName, newName)
defer log.Trace(oldName, "newName=%q", newName)("err = %v", &err)
return w.vfs.Rename(oldName, newName)
}
// Stat returns info about the file or directory
func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err error) {
// defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
VFS, err := w.getVFS(ctx)
if err != nil {
return nil, err
}
fi, err = VFS.Stat(name)
defer log.Trace(name, "")("fi=%+v, err = %v", &fi, &err)
fi, err = w.vfs.Stat(name)
if err != nil {
return nil, err
}
@@ -342,7 +270,7 @@ type FileInfo struct {
// ETag returns an ETag for the FileInfo
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
if hashType == hash.None {
return "", webdav.ErrNotImplemented
}
@@ -365,7 +293,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
// ContentType returns a content type for the FileInfo
func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err error) {
// defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
defer log.Trace(fi, "")("etag=%q, err=%v", &contentType, &err)
node, ok := (fi.FileInfo).(vfs.Node)
if !ok {
fs.Errorf(fi, "Expecting vfs.Node, got %T", fi.FileInfo)

View File

@@ -8,22 +8,21 @@
package webdav
import (
"context"
"flag"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"testing"
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/webdav"
@@ -31,8 +30,6 @@ import (
const (
testBindAddress = "localhost:0"
testUser = "user"
testPass = "pass"
)
// check interfaces
@@ -45,34 +42,50 @@ var (
// TestWebDav runs the webdav server then runs the unit tests for the
// webdav remote against it.
func TestWebDav(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
opt.BasicUser = testUser
opt.BasicPass = testPass
hashType = hash.MD5
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
// Start the server
w := newWebDAV(f, &opt)
assert.NoError(t, w.serve())
fstest.Initialise()
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "webdav",
"vendor": "other",
"url": w.Server.URL(),
"user": testUser,
"pass": obscure.MustObscure(testPass),
}
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
return config, func() {
w.Close()
w.Wait()
}
err = fremote.Mkdir(context.Background(), "")
assert.NoError(t, err)
// Start the server
w := newWebDAV(fremote, &opt)
assert.NoError(t, w.serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/webdav")
assert.NoError(t, err, "failed to cd to webdav remote")
// Run the webdav tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
servetest.Run(t, "webdav", start)
if *fstest.Verbose {
args = append(args, "-verbose")
}
args = append(args, "-remote", "webdavtest:")
cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(),
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running webdav integration tests")
}
// Test serve http functionality in serve webdav
@@ -84,6 +97,10 @@ var (
)
func TestHTTPFunction(t *testing.T) {
// cd to correct directory for testing
err := os.Chdir("../../cmd/serve/webdav")
assert.NoError(t, err, "failed to cd to webdav cmd directory")
// exclude files called hidden.txt and directories called hidden
require.NoError(t, filter.Active.AddRule("- hidden.txt"))
require.NoError(t, filter.Active.AddRule("- hidden/**"))

88
fs/cache/cache.go vendored
View File

@@ -2,39 +2,93 @@
package cache
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/cache"
)
var (
c = cache.New()
fsCacheMu sync.Mutex
fsCache = map[string]*cacheEntry{}
fsNewFs = fs.NewFs // for tests
expireRunning = false
cacheExpireDuration = 300 * time.Second // expire the cache entry when it is older than this
cacheExpireInterval = 60 * time.Second // interval to run the cache expire
)
// GetFn gets a fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(fsString string, create func(fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
value, err := c.Get(fsString, func(fsString string) (value interface{}, ok bool, error error) {
f, err := create(fsString)
ok = err == nil || err == fs.ErrorIsFile
return f, ok, err
})
if err != nil {
return nil, err
}
return value.(fs.Fs), nil
type cacheEntry struct {
f fs.Fs // cached f
err error // nil or fs.ErrorIsFile
fsString string // remote string
lastUsed time.Time // time used for expiry
}
// Get gets a fs.Fs named fsString either from the cache or creates it afresh
func Get(fsString string) (f fs.Fs, err error) {
return GetFn(fsString, fs.NewFs)
fsCacheMu.Lock()
entry, ok := fsCache[fsString]
if !ok {
fsCacheMu.Unlock() // Unlock in case Get is called recursively
f, err = fsNewFs(fsString)
if err != nil && err != fs.ErrorIsFile {
return f, err
}
entry = &cacheEntry{
f: f,
fsString: fsString,
err: err,
}
fsCacheMu.Lock()
fsCache[fsString] = entry
}
defer fsCacheMu.Unlock()
entry.lastUsed = time.Now()
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
return entry.f, entry.err
}
// Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) {
c.Put(fsString, f)
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
fsCache[fsString] = &cacheEntry{
f: f,
fsString: fsString,
lastUsed: time.Now(),
}
if !expireRunning {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
}
}
// cacheExpire expires any entries that haven't been used recently
func cacheExpire() {
fsCacheMu.Lock()
defer fsCacheMu.Unlock()
now := time.Now()
for fsString, entry := range fsCache {
if now.Sub(entry.lastUsed) > cacheExpireDuration {
delete(fsCache, fsString)
}
}
if len(fsCache) != 0 {
time.AfterFunc(cacheExpireInterval, cacheExpire)
expireRunning = true
} else {
expireRunning = false
}
}
// Clear removes everything from the cahce
func Clear() {
c.Clear()
fsCacheMu.Lock()
for k := range fsCache {
delete(fsCache, k)
}
fsCacheMu.Unlock()
}

View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/mockfs"
@@ -16,9 +17,10 @@ var (
errSentinel = errors.New("an error")
)
func mockNewFs(t *testing.T) (func(), func(path string) (fs.Fs, error)) {
func mockNewFs(t *testing.T) func() {
called = 0
create := func(path string) (fs.Fs, error) {
oldFsNewFs := fsNewFs
fsNewFs = func(path string) (fs.Fs, error) {
assert.Equal(t, 0, called)
called++
switch path {
@@ -31,74 +33,115 @@ func mockNewFs(t *testing.T) (func(), func(path string) (fs.Fs, error)) {
}
panic(fmt.Sprintf("Unknown path %q", path))
}
cleanup := func() {
c.Clear()
return func() {
fsNewFs = oldFsNewFs
fsCacheMu.Lock()
fsCache = map[string]*cacheEntry{}
expireRunning = false
fsCacheMu.Unlock()
}
return cleanup, create
}
func TestGet(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
defer mockNewFs(t)()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, len(fsCache))
f, err := GetFn("/", create)
f, err := Get("/")
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, len(fsCache))
f2, err := GetFn("/", create)
f2, err := Get("/")
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestGetFile(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
defer mockNewFs(t)()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, len(fsCache))
f, err := GetFn("/file.txt", create)
f, err := Get("/file.txt")
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, len(fsCache))
f2, err := GetFn("/file.txt", create)
f2, err := Get("/file.txt")
require.Equal(t, fs.ErrorIsFile, err)
assert.Equal(t, f, f2)
}
func TestGetError(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
defer mockNewFs(t)()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, len(fsCache))
f, err := GetFn("/error", create)
f, err := Get("/error")
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, len(fsCache))
}
func TestPut(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
defer mockNewFs(t)()
f := mockfs.NewFs("mock", "mock")
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, len(fsCache))
Put("/alien", f)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, len(fsCache))
fNew, err := GetFn("/alien", create)
fNew, err := Get("/alien")
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, len(fsCache))
}
func TestCacheExpire(t *testing.T) {
defer mockNewFs(t)()
cacheExpireInterval = time.Millisecond
assert.Equal(t, false, expireRunning)
_, err := Get("/")
require.NoError(t, err)
fsCacheMu.Lock()
entry := fsCache["/"]
assert.Equal(t, 1, len(fsCache))
fsCacheMu.Unlock()
cacheExpire()
fsCacheMu.Lock()
assert.Equal(t, 1, len(fsCache))
entry.lastUsed = time.Now().Add(-cacheExpireDuration - 60*time.Second)
assert.Equal(t, true, expireRunning)
fsCacheMu.Unlock()
time.Sleep(10 * time.Millisecond)
fsCacheMu.Lock()
assert.Equal(t, false, expireRunning)
assert.Equal(t, 0, len(fsCache))
fsCacheMu.Unlock()
}
func TestClear(t *testing.T) {
defer mockNewFs(t)()
assert.Equal(t, 0, len(fsCache))
_, err := Get("/")
require.NoError(t, err)
assert.Equal(t, 1, len(fsCache))
Clear()
assert.Equal(t, 0, len(fsCache))
}

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
@@ -53,7 +52,7 @@ func TestMultithreadCopy(t *testing.T) {
} {
t.Run(fmt.Sprintf("%+v", test), func(t *testing.T) {
var err error
contents := random.String(test.size)
contents := fstest.RandomString(test.size)
t1 := fstest.Time("2001-02-03T04:05:06.499999999Z")
file1 := r.WriteObject(context.Background(), "file1", contents, t1)
fstest.CheckItems(t, r.Fremote, file1)

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"path"
"path/filepath"
"sort"
@@ -27,7 +28,6 @@ import (
"github.com/rclone/rclone/fs/march"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
@@ -1666,7 +1666,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
// to avoid issues with certain remotes and avoid file deletion.
if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) {
// Create random name to temporarily move file to
tmpObjName := dstFileName + "-rclone-move-" + random.String(8)
tmpObjName := dstFileName + "-rclone-move-" + random(8)
_, err := fdst.NewObject(ctx, tmpObjName)
if err != fs.ErrorObjectNotFound {
if err == nil {
@@ -1730,6 +1730,17 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
return err
}
// random generates a pseudorandom alphanumeric string
func random(length int) string {
randomOutput := make([]byte, length)
possibleCharacters := "123567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
rand.Seed(time.Now().Unix())
for i := range randomOutput {
randomOutput[i] = possibleCharacters[rand.Intn(len(possibleCharacters))]
}
return string(randomOutput)
}
// MoveFile moves a single file possibly to a new name
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)

View File

@@ -20,8 +20,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.")
flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.")
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost")
flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "w", false, "Launch WebGUI on localhost")
flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Update / Force update to latest version of web gui")
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/negative0/rclone-webui-react/releases/latest", "URL to fetch the releases from")
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
}

View File

@@ -95,8 +95,6 @@ func (s *Server) Serve() error {
// Don't open browser if serving in testing environment.
if flag.Lookup("test.v") == nil {
_ = open.Start(openURL.String())
} else {
fs.Errorf(nil, "Not opening browser in testing environment")
}
}
return nil

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
@@ -358,6 +357,24 @@ func Time(timeString string) time.Time {
return t
}
// RandomString create a random string for test purposes
func RandomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
@@ -386,7 +403,7 @@ func RandomRemoteName(remoteName string) (string, string, error) {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = "rclone-test-" + random.String(24)
leafName = "rclone-test-" + RandomString(24)
if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName)
}
@@ -415,7 +432,7 @@ func RandomRemote(remoteName string, subdir bool) (fs.Fs, string, func(), error)
if err != nil {
return nil, "", nil, err
}
remoteName += "/rclone-test-subdir-" + random.String(8)
remoteName += "/rclone-test-subdir-" + RandomString(8)
}
remote, err := fs.NewFs(remoteName)

View File

@@ -31,7 +31,6 @@ import (
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -158,7 +157,7 @@ func testPut(t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) {
contents string
)
retry(t, "Put", func() error {
contents = random.String(100)
contents = fstest.RandomString(100)
buf := bytes.NewBufferString(contents)
uploadHash = hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
@@ -558,7 +557,7 @@ func Run(t *testing.T, opt *Opt) {
const N = 5 * 1024
// Read N bytes then produce an error
contents := random.String(N)
contents := fstest.RandomString(N)
buf := bytes.NewBufferString(contents)
er := &errorReader{errors.New("potato")}
in := io.MultiReader(buf, er)
@@ -1323,7 +1322,7 @@ func Run(t *testing.T, opt *Opt) {
// TestObjectUpdate tests that Update works
t.Run("ObjectUpdate", func(t *testing.T) {
skipIfNotOk(t)
contents := random.String(200)
contents := fstest.RandomString(200)
buf := bytes.NewBufferString(contents)
hash := hash.NewMultiHasher()
in := io.TeeReader(buf, hash)
@@ -1508,7 +1507,7 @@ func Run(t *testing.T, opt *Opt) {
contentSize = 100
)
retry(t, "PutStream", func() error {
contents := random.String(contentSize)
contents := fstest.RandomString(contentSize)
buf := bytes.NewBufferString(contents)
uploadHash = hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
@@ -1565,7 +1564,7 @@ func Run(t *testing.T, opt *Opt) {
assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1")
}()
contents := random.String(100)
contents := fstest.RandomString(100)
in := bytes.NewBufferString(contents)
obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
@@ -1588,7 +1587,7 @@ func Run(t *testing.T, opt *Opt) {
assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1")
}()
newContents := random.String(200)
newContents := fstest.RandomString(200)
in := bytes.NewBufferString(newContents)
obj := findObject(t, remote, unknownSizeUpdateFile.Path)

134
lib/cache/cache.go vendored
View File

@@ -1,134 +0,0 @@
// Package cache implements a simple cache where the entries are
// expired after a given time (5 minutes of disuse by default).
package cache
import (
"sync"
"time"
)
// Cache holds values indexed by string, but expired after a given (5
// minutes by default).
type Cache struct {
mu sync.Mutex
cache map[string]*cacheEntry
expireRunning bool
expireDuration time.Duration // expire the cache entry when it is older than this
expireInterval time.Duration // interval to run the cache expire
}
// New creates a new cache with the default expire duration and interval
func New() *Cache {
return &Cache{
cache: map[string]*cacheEntry{},
expireRunning: false,
expireDuration: 300 * time.Second,
expireInterval: 60 * time.Second,
}
}
// cacheEntry is stored in the cache
type cacheEntry struct {
value interface{} // cached item
err error // creation error
key string // key
lastUsed time.Time // time used for expiry
}
// CreateFunc is called to create new values. If the create function
// returns an error it will be cached if ok is true, otherwise the
// error will just be returned, allowing negative caching if required.
type CreateFunc func(key string) (value interface{}, ok bool, error error)
// used marks an entry as accessed now and kicks the expire timer off
// should be called with the lock held
func (c *Cache) used(entry *cacheEntry) {
entry.lastUsed = time.Now()
if !c.expireRunning {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
}
}
// Get gets a value named key either from the cache or creates it
// afresh with the create function.
func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error) {
c.mu.Lock()
entry, ok := c.cache[key]
if !ok {
c.mu.Unlock() // Unlock in case Get is called recursively
value, ok, err = create(key)
if err != nil && !ok {
return value, err
}
entry = &cacheEntry{
value: value,
key: key,
err: err,
}
c.mu.Lock()
c.cache[key] = entry
}
defer c.mu.Unlock()
c.used(entry)
return entry.value, entry.err
}
// Put puts an value named key into the cache
func (c *Cache) Put(key string, value interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
entry := &cacheEntry{
value: value,
key: key,
}
c.used(entry)
c.cache[key] = entry
}
// GetMaybe returns the key and true if found, nil and false if not
func (c *Cache) GetMaybe(key string) (value interface{}, found bool) {
c.mu.Lock()
defer c.mu.Unlock()
entry, found := c.cache[key]
if !found {
return nil, found
}
c.used(entry)
return entry.value, found
}
// cacheExpire expires any entries that haven't been used recently
func (c *Cache) cacheExpire() {
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
for key, entry := range c.cache {
if now.Sub(entry.lastUsed) > c.expireDuration {
delete(c.cache, key)
}
}
if len(c.cache) != 0 {
time.AfterFunc(c.expireInterval, c.cacheExpire)
c.expireRunning = true
} else {
c.expireRunning = false
}
}
// Clear removes everything from the cahce
func (c *Cache) Clear() {
c.mu.Lock()
for k := range c.cache {
delete(c.cache, k)
}
c.mu.Unlock()
}
// Entries returns the number of entries in the cache
func (c *Cache) Entries() int {
c.mu.Lock()
entries := len(c.cache)
c.mu.Unlock()
return entries
}

View File

@@ -1,174 +0,0 @@
package cache
import (
"errors"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
called = 0
errSentinel = errors.New("an error")
errCached = errors.New("a cached error")
)
func setup(t *testing.T) (*Cache, CreateFunc) {
called = 0
create := func(path string) (interface{}, bool, error) {
assert.Equal(t, 0, called)
called++
switch path {
case "/":
return "/", true, nil
case "/file.txt":
return "/file.txt", true, errCached
case "/error":
return nil, false, errSentinel
}
panic(fmt.Sprintf("Unknown path %q", path))
}
c := New()
return c, create
}
func TestGet(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, f, f2)
}
func TestGetFile(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, 1, len(c.cache))
f2, err := c.Get("/file.txt", create)
require.Equal(t, errCached, err)
assert.Equal(t, f, f2)
}
func TestGetError(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
f, err := c.Get("/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, len(c.cache))
}
func TestPut(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
c.Put("/alien", "slime")
assert.Equal(t, 1, len(c.cache))
fNew, err := c.Get("/alien", create)
require.NoError(t, err)
require.Equal(t, "slime", fNew)
assert.Equal(t, 1, len(c.cache))
}
func TestCacheExpire(t *testing.T) {
c, create := setup(t)
c.expireInterval = time.Millisecond
assert.Equal(t, false, c.expireRunning)
_, err := c.Get("/", create)
require.NoError(t, err)
c.mu.Lock()
entry := c.cache["/"]
assert.Equal(t, 1, len(c.cache))
c.mu.Unlock()
c.cacheExpire()
c.mu.Lock()
assert.Equal(t, 1, len(c.cache))
entry.lastUsed = time.Now().Add(-c.expireDuration - 60*time.Second)
assert.Equal(t, true, c.expireRunning)
c.mu.Unlock()
time.Sleep(10 * time.Millisecond)
c.mu.Lock()
assert.Equal(t, false, c.expireRunning)
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestClear(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, len(c.cache))
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, len(c.cache))
c.Clear()
assert.Equal(t, 0, len(c.cache))
}
func TestEntries(t *testing.T) {
c, create := setup(t)
assert.Equal(t, 0, c.Entries())
_, err := c.Get("/", create)
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
c.Clear()
assert.Equal(t, 0, c.Entries())
}
func TestGetMaybe(t *testing.T) {
c, create := setup(t)
value, found := c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
f, err := c.Get("/", create)
require.NoError(t, err)
value, found = c.GetMaybe("/")
assert.Equal(t, true, found)
assert.Equal(t, f, value)
c.Clear()
value, found = c.GetMaybe("/")
assert.Equal(t, false, found)
assert.Nil(t, value)
}

View File

@@ -1,22 +0,0 @@
// Package random holds a few functions for working with random numbers
package random
import "math/rand"
// String create a random string for test purposes
func String(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}

View File

@@ -1,13 +0,0 @@
package random
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestString(t *testing.T) {
for i := 0; i < 100; i++ {
assert.Equal(t, i, len(String(i)))
}
}

View File

@@ -18,7 +18,6 @@ import (
"time"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/random"
)
var (
@@ -36,6 +35,24 @@ func init() {
}
// RandomString create a random string for test purposes
func RandomString(n int) string {
const (
vowel = "aeiou"
consonant = "bcdfghjklmnpqrstvwxyz"
digit = "0123456789"
)
pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit}
out := make([]byte, n)
p := 0
for i := range out {
source := pattern[p]
p = (p + 1) % len(pattern)
out[i] = source[rand.Intn(len(source))]
}
return string(out)
}
// Test contains stats about the running test which work for files or
// directories
type Test struct {
@@ -54,7 +71,7 @@ type Test struct {
func NewTest(Dir string) *Test {
t := &Test{
dir: Dir,
name: random.String(*nameLength),
name: RandomString(*nameLength),
isDir: rand.Intn(2) == 0,
number: atomic.AddInt32(&testNumber, 1),
timer: time.NewTimer(*timeout),
@@ -151,7 +168,7 @@ func (t *Test) rename() {
return
}
t.logf("rename")
NewName := random.String(*nameLength)
NewName := RandomString(*nameLength)
newPath := path.Join(t.dir, NewName)
err := os.Rename(t.path(), newPath)
if err != nil {

View File

@@ -242,11 +242,6 @@ func New(f fs.Fs, opt *Options) *VFS {
return vfs
}
// Fs returns the Fs passed into the New call
func (vfs *VFS) Fs() fs.Fs {
return vfs.f
}
// SetCacheMode change the cache mode
func (vfs *VFS) SetCacheMode(cacheMode CacheMode) {
vfs.Shutdown()