mirror of
https://github.com/rclone/rclone.git
synced 2026-02-03 10:13:22 +00:00
Compare commits
48 Commits
azureblob-
...
drive-untr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77b1eaeffe | ||
|
|
ab78eb13e4 | ||
|
|
b1f31c2acf | ||
|
|
dcc74fa404 | ||
|
|
6759d36e2f | ||
|
|
a4797014c9 | ||
|
|
4d7d240c12 | ||
|
|
d046402d80 | ||
|
|
9bdf465c10 | ||
|
|
f3f48d7d49 | ||
|
|
3c89406886 | ||
|
|
85d09729f2 | ||
|
|
b3bd2d1c9e | ||
|
|
4c586a9264 | ||
|
|
1c80e84f8a | ||
|
|
028f8a69d3 | ||
|
|
b0d1fa1d6b | ||
|
|
dbb4b2c900 | ||
|
|
99201f8ba4 | ||
|
|
5ad8bcb43a | ||
|
|
6efedc4043 | ||
|
|
a3d9a38f51 | ||
|
|
b1bd17a220 | ||
|
|
793f594b07 | ||
|
|
4fe6614ae1 | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 |
@@ -47,4 +47,4 @@ deploy:
|
||||
on:
|
||||
all_branches: true
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_OS_NAME == linux && $TRAVIS_PULL_REQUEST == false
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
47
Gopkg.lock
generated
47
Gopkg.lock
generated
@@ -18,24 +18,16 @@
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"storage",
|
||||
"version"
|
||||
]
|
||||
revision = "cd93ccfe0395e70031704ca68f14606588eec120"
|
||||
version = "v17.3.0"
|
||||
name = "github.com/Azure/azure-pipeline-go"
|
||||
packages = ["pipeline"]
|
||||
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
|
||||
version = "0.1.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "f04d503958a4fe854c1b41667c73f8813c9dd9c3"
|
||||
version = "v10.11.2"
|
||||
branch = "master"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
packages = ["2018-03-28/azblob"]
|
||||
revision = "eaae161d9d5e07363f04ddb19d84d57efc66d1a1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -124,12 +116,6 @@
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
@@ -201,12 +187,6 @@
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
@@ -285,12 +265,6 @@
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
@@ -331,7 +305,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "3ba49835f4db01d6329782cbdc7a0a8bb3a26c5f"
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -470,7 +444,6 @@
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
@@ -485,6 +458,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
|
||||
inputs-digest = "670cdb55138aa1394b4c8f87345e9be9c8105248edda4be7176dddee2a4f5d26"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
@@ -9,3 +9,7 @@
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
|
||||
38
Makefile
38
Makefile
@@ -1,12 +1,22 @@
|
||||
SHELL = bash
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags)-$${APPVEYOR_REPO_BRANCH:-$${TRAVIS_BRANCH:-$$(git rev-parse --abbrev-ref HEAD)}} | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
@@ -21,6 +31,7 @@ rclone:
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@@ -160,25 +171,32 @@ else
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(APPVEYOR_REPO_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
ifeq ($(TRAVIS_BRANCH),master)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Amazon Drive
|
||||
* Amazon Drive ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Box
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -17,29 +18,42 @@ func init() {
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
Required: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
if opt.Remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName == "local" {
|
||||
return fs.NewFs(root)
|
||||
}
|
||||
return fs.NewFs(configName + ":" + root)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,8 @@ import (
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -37,19 +38,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -67,35 +66,62 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "amazon cloud drive",
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client ID.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "checkpoint",
|
||||
Help: "Checkpoint for internal polling (debug).",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_wait_per_gb",
|
||||
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||
Default: fs.Duration(180 * time.Second),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "templink_threshold",
|
||||
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
c *acd.Client // the connection to the acd server
|
||||
noAuthClient *http.Client // unauthenticated http client
|
||||
root string // the path we are working on
|
||||
@@ -191,7 +217,13 @@ func filterRequest(req *http.Request) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
@@ -201,7 +233,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
@@ -210,6 +242,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
@@ -527,13 +560,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
}
|
||||
|
||||
// Don't wait for uploads - assume they will appear later
|
||||
if *uploadWaitPerGB <= 0 {
|
||||
if f.opt.UploadWaitPerGB <= 0 {
|
||||
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
||||
return false, inInfo, inErr
|
||||
}
|
||||
|
||||
// Time we should wait for the upload
|
||||
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
|
||||
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
||||
|
||||
const sleepTime = 5 * time.Second // sleep between tries
|
||||
@@ -1015,7 +1048,7 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
}
|
||||
@@ -1208,7 +1241,7 @@ func (o *Object) MimeType() string {
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@@ -18,13 +21,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
@@ -32,24 +34,20 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
apiVersion = "2017-04-17"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
listChunkSize = 5000 // number of items to read at once
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
maxTotalParts = 50000 // in multipart upload
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
maxChunkSize = fs.SizeSuffix(100 * 1024 * 1024)
|
||||
chunkSize = fs.SizeSuffix(4 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
||||
defaultChunkSize = 4 * 1024 * 1024
|
||||
maxChunkSize = 100 * 1024 * 1024
|
||||
defaultUploadCutoff = 256 * 1024 * 1024
|
||||
maxUploadCutoff = 256 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -64,31 +62,45 @@ func init() {
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Storage Account Key (leave blank to use connection string or SAS URL)",
|
||||
}, {
|
||||
Name: "connection_string",
|
||||
Help: "Connection string (leave blank if using account/key or SAS URL)",
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
endpoint string // name of the starting api endpoint
|
||||
bc *storage.BlobStorageClient
|
||||
cc *storage.Container
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||
cntURL *azblob.ContainerURL // reference to containerURL
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
containerOK bool // true if we have created the container
|
||||
@@ -99,13 +111,14 @@ type Fs struct {
|
||||
|
||||
// Object describes a azure object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // blob metadata
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
modTime time.Time // The modified time of the object if known
|
||||
md5 string // MD5 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
accessTier azblob.AccessTierType // Blob Access Tier
|
||||
meta map[string]string // blob metadata
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -165,8 +178,8 @@ var retryErrorCodes = []int{
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
statusCode := storageErr.StatusCode
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
statusCode := storageErr.Response().StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
return true, err
|
||||
@@ -177,59 +190,74 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", uploadCutoff, maxUploadCutoff)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, chunkSize)
|
||||
|
||||
if opt.UploadCutoff > maxUploadCutoff {
|
||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||
}
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
key := config.FileGet(name, "key")
|
||||
connectionString := config.FileGet(name, "connection_string")
|
||||
sasURL := config.FileGet(name, "sas_url")
|
||||
endpoint := config.FileGet(name, "endpoint", storage.DefaultBaseURL)
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = storageDefaultBaseURL
|
||||
}
|
||||
|
||||
var client storage.Client
|
||||
var (
|
||||
u *url.URL
|
||||
serviceURL azblob.ServiceURL
|
||||
containerURL azblob.ContainerURL
|
||||
)
|
||||
switch {
|
||||
case account != "" && key != "":
|
||||
client, err = storage.NewClient(account, key, endpoint, apiVersion, true)
|
||||
case opt.Account != "" && opt.Key != "":
|
||||
credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage client from account/key")
|
||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||
}
|
||||
case connectionString != "":
|
||||
client, err = storage.NewClientFromConnectionString(connectionString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make azure storage client from connection string")
|
||||
}
|
||||
case sasURL != "":
|
||||
URL, err := url.Parse(sasURL)
|
||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
case opt.SASURL != "":
|
||||
u, err = url.Parse(opt.SASURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||
}
|
||||
container, err := storage.GetContainerReferenceFromSASURI(*URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make azure storage client from SAS URL")
|
||||
// use anonymous credentials in case of sas url
|
||||
pipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
|
||||
// Check if we have container level SAS or account level sas
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
if parts.ContainerName != "" {
|
||||
if container != "" && parts.ContainerName != container {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
containerURL = serviceURL.NewContainerURL(container)
|
||||
}
|
||||
client = *container.Client()
|
||||
default:
|
||||
return nil, errors.New("Need account+key or connectionString or sasURL")
|
||||
}
|
||||
client.HTTPClient = fshttp.NewClient(fs.Config)
|
||||
bc := client.GetBlobService()
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
container: container,
|
||||
root: directory,
|
||||
account: account,
|
||||
endpoint: endpoint,
|
||||
bc: &bc,
|
||||
cc: bc.GetContainerReference(container),
|
||||
svcURL: &serviceURL,
|
||||
cntURL: &containerURL,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
}
|
||||
@@ -267,13 +295,13 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Blob) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
err := o.decodeMetaData(info)
|
||||
err := o.decodeMetaDataFromBlob(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -293,13 +321,12 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (f *Fs) getBlobReference(remote string) *storage.Blob {
|
||||
return f.cc.GetBlobReference(f.root + remote)
|
||||
func (f *Fs) getBlobReference(remote string) azblob.BlobURL {
|
||||
return f.cntURL.NewBlobURL(f.root + remote)
|
||||
}
|
||||
|
||||
// getBlobWithModTime adds the modTime passed in to o.meta and creates
|
||||
// a Blob from it.
|
||||
func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
// updateMetadataWithModTime adds the modTime passed in to o.meta.
|
||||
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
@@ -307,14 +334,10 @@ func (o *Object) getBlobWithModTime(modTime time.Time) *storage.Blob {
|
||||
|
||||
// Set modTimeKey in it
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
blob.Metadata = o.meta
|
||||
return blob
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object
|
||||
type listFn func(remote string, object *storage.Blob, isDirectory bool) error
|
||||
type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
||||
|
||||
// list lists the objects into the function supplied from
|
||||
// the container and root supplied
|
||||
@@ -335,32 +358,39 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
params := storage.ListBlobsParameters{
|
||||
MaxResults: maxResults,
|
||||
Prefix: root,
|
||||
Delimiter: delimiter,
|
||||
Include: &storage.IncludeBlobDataset{
|
||||
Snapshots: false,
|
||||
Metadata: true,
|
||||
UncommittedBlobs: false,
|
||||
|
||||
options := azblob.ListBlobsSegmentOptions{
|
||||
Details: azblob.BlobListingDetails{
|
||||
Copy: false,
|
||||
Metadata: true,
|
||||
Snapshots: false,
|
||||
UncommittedBlobs: false,
|
||||
Deleted: false,
|
||||
},
|
||||
Prefix: root,
|
||||
MaxResults: int32(maxResults),
|
||||
}
|
||||
for {
|
||||
var response storage.BlobListResponse
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.cc.ListBlobs(params)
|
||||
response, err = f.cntURL.ListBlobsHierarchySegment(ctx, marker, delimiter, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
for i := range response.Blobs {
|
||||
file := &response.Blobs[i]
|
||||
// Advance marker to next
|
||||
marker = response.NextMarker
|
||||
|
||||
for i := range response.Segment.BlobItems {
|
||||
file := &response.Segment.BlobItems[i]
|
||||
// Finish if file name no longer has prefix
|
||||
// if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
@@ -382,8 +412,8 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
}
|
||||
}
|
||||
// Send the subdirectories
|
||||
for _, remote := range response.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote, "/")
|
||||
for _, remote := range response.Segment.BlobPrefixes {
|
||||
remote := strings.TrimRight(remote.Name, "/")
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
fs.Debugf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
@@ -395,17 +425,12 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if response.NextMarker == "" {
|
||||
break
|
||||
}
|
||||
params.Marker = response.NextMarker
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Blob, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItem, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
@@ -429,7 +454,7 @@ func (f *Fs) markContainerOK() {
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, false, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -452,13 +477,8 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err = f.listContainersToFn(func(container *storage.Container) error {
|
||||
t, err := time.Parse(time.RFC1123, container.Properties.LastModified)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to parse LastModified %q: %v", container.Properties.LastModified, err)
|
||||
t = time.Time{}
|
||||
}
|
||||
d := fs.NewDir(container.Name, t)
|
||||
err = f.listContainersToFn(func(container *azblob.ContainerItem) error {
|
||||
d := fs.NewDir(container.Name, container.Properties.LastModified)
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
@@ -505,7 +525,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list(dir, true, listChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -521,27 +541,34 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// listContainerFn is called from listContainersToFn to handle a container
|
||||
type listContainerFn func(*storage.Container) error
|
||||
type listContainerFn func(*azblob.ContainerItem) error
|
||||
|
||||
// listContainersToFn lists the containers to the function supplied
|
||||
func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
||||
// FIXME page the containers if necessary?
|
||||
params := storage.ListContainersParameters{}
|
||||
var response *storage.ContainerListResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.bc.ListContainers(params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
params := azblob.ListContainersSegmentOptions{
|
||||
MaxResults: int32(listChunkSize),
|
||||
}
|
||||
for i := range response.Containers {
|
||||
err = fn(&response.Containers[i])
|
||||
ctx := context.Background()
|
||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||
var response *azblob.ListContainersResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = f.svcURL.ListContainersSegment(ctx, marker, params)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range response.ContainerItems {
|
||||
err = fn(&response.ContainerItems[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
marker = response.NextMarker
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -566,23 +593,20 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if f.containerOK {
|
||||
return nil
|
||||
}
|
||||
options := storage.CreateContainerOptions{
|
||||
Access: storage.ContainerAccessTypePrivate,
|
||||
}
|
||||
|
||||
// now try to create the container
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
err := f.cc.Create(&options)
|
||||
ctx := context.Background()
|
||||
_, err := f.cntURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
switch storageErr.StatusCode {
|
||||
case http.StatusConflict:
|
||||
switch storageErr.Code {
|
||||
case "ContainerAlreadyExists":
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case "ContainerBeingDeleted":
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case azblob.ServiceCodeContainerAlreadyExists:
|
||||
f.containerOK = true
|
||||
return false, nil
|
||||
case azblob.ServiceCodeContainerBeingDeleted:
|
||||
f.containerDeleted = true
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -598,7 +622,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||
func (f *Fs) isEmpty(dir string) (err error) {
|
||||
empty := true
|
||||
err = f.list("", true, 1, func(remote string, object *storage.Blob, isDirectory bool) error {
|
||||
err = f.list("", true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||
empty = false
|
||||
return nil
|
||||
})
|
||||
@@ -616,16 +640,23 @@ func (f *Fs) isEmpty(dir string) (err error) {
|
||||
func (f *Fs) deleteContainer() error {
|
||||
f.containerOKMu.Lock()
|
||||
defer f.containerOKMu.Unlock()
|
||||
options := storage.DeleteContainerOptions{}
|
||||
options := azblob.ContainerAccessConditions{}
|
||||
ctx := context.Background()
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
exists, err := f.cc.Exists()
|
||||
_, err := f.cntURL.GetProperties(ctx, azblob.LeaseAccessConditions{})
|
||||
if err == nil {
|
||||
_, err = f.cntURL.Delete(ctx, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Check http error code along with service code, current SDK doesn't populate service code correctly sometimes
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return f.shouldRetry(err)
|
||||
}
|
||||
if !exists {
|
||||
return false, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.cc.Delete(&options)
|
||||
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -688,17 +719,36 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
dstBlob := f.getBlobReference(remote)
|
||||
srcBlob := srcObj.getBlobReference()
|
||||
options := storage.CopyOptions{}
|
||||
sourceBlobURL := srcBlob.GetURL()
|
||||
dstBlobURL := f.getBlobReference(remote)
|
||||
srcBlobURL := srcObj.getBlobReference()
|
||||
|
||||
source, err := url.Parse(srcBlobURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = dstBlob.Copy(sourceBlobURL, &options)
|
||||
startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, options, options)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
copyStatus := startCopy.CopyStatus()
|
||||
for copyStatus == azblob.CopyStatusPending {
|
||||
time.Sleep(1 * time.Second)
|
||||
getMetadata, err := dstBlobURL.GetProperties(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = getMetadata.CopyStatus()
|
||||
}
|
||||
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
@@ -743,22 +793,10 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
o.md5 = info.Properties.ContentMD5
|
||||
o.mimeType = info.Properties.ContentType
|
||||
o.size = info.Properties.ContentLength
|
||||
o.modTime = time.Time(info.Properties.LastModified)
|
||||
if len(info.Metadata) > 0 {
|
||||
o.meta = info.Metadata
|
||||
if modTime, ok := info.Metadata[modTimeKey]; ok {
|
||||
func (o *Object) setMetadata(metadata azblob.Metadata) {
|
||||
if len(metadata) > 0 {
|
||||
o.meta = metadata
|
||||
if modTime, ok := metadata[modTimeKey]; ok {
|
||||
when, err := time.Parse(timeFormatIn, modTime)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't parse %v = %q: %v", modTimeKey, modTime, err)
|
||||
@@ -768,11 +806,42 @@ func (o *Object) decodeMetaData(info *storage.Blob) (err error) {
|
||||
} else {
|
||||
o.meta = nil
|
||||
}
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
// o.id
|
||||
// o.modTime
|
||||
// o.size
|
||||
// o.md5
|
||||
// o.meta
|
||||
func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) {
|
||||
// NOTE - In BlobGetPropertiesResponse, Client library returns MD5 as base64 decoded string
|
||||
// unlike BlobProperties in BlobItem (used in decodeMetadataFromBlob) which returns base64
|
||||
// encoded bytes. Object needs to maintain this as base64 encoded string.
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = info.ContentLength()
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(info.NewMetadata())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItem) (err error) {
|
||||
o.md5 = string(info.Properties.ContentMD5[:])
|
||||
o.mimeType = *info.Properties.ContentType
|
||||
o.size = *info.Properties.ContentLength
|
||||
o.modTime = info.Properties.LastModified
|
||||
o.accessTier = info.Properties.AccessTier
|
||||
o.setMetadata(info.Metadata)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlobReference creates an empty blob reference with no metadata
|
||||
func (o *Object) getBlobReference() *storage.Blob {
|
||||
func (o *Object) getBlobReference() azblob.BlobURL {
|
||||
return o.fs.getBlobReference(o.remote)
|
||||
}
|
||||
|
||||
@@ -795,19 +864,22 @@ func (o *Object) readMetaData() (err error) {
|
||||
blob := o.getBlobReference()
|
||||
|
||||
// Read metadata (this includes metadata)
|
||||
getPropertiesOptions := storage.GetBlobPropertiesOptions{}
|
||||
options := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
var blobProperties *azblob.BlobGetPropertiesResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.GetProperties(&getPropertiesOptions)
|
||||
blobProperties, err = blob.GetProperties(ctx, options)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if storageErr, ok := err.(storage.AzureStorageServiceError); ok && storageErr.StatusCode == http.StatusNotFound {
|
||||
// On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well
|
||||
if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || storageErr.Response().StatusCode == http.StatusNotFound) {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return o.decodeMetaData(blob)
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
@@ -844,10 +916,17 @@ func (o *Object) ModTime() (result time.Time) {
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
blob := o.getBlobWithModTime(modTime)
|
||||
options := storage.SetBlobMetadataOptions{}
|
||||
// Make sure o.meta is not nil
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, 1)
|
||||
}
|
||||
// Set modTimeKey in it
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.SetMetadata(&options)
|
||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -864,29 +943,18 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
getBlobOptions := storage.GetBlobOptions{}
|
||||
getBlobRangeOptions := storage.GetBlobRangeOptions{
|
||||
GetBlobOptions: &getBlobOptions,
|
||||
}
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
start, end := x.Start, x.End
|
||||
if end < 0 {
|
||||
end = 0
|
||||
}
|
||||
if start < 0 {
|
||||
start = o.size - end
|
||||
end = 0
|
||||
}
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(start),
|
||||
End: uint64(end),
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
getBlobRangeOptions.Range = &storage.BlobRange{
|
||||
Start: uint64(x.Offset),
|
||||
}
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -894,17 +962,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
var dowloadResponse *azblob.DownloadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if getBlobRangeOptions.Range == nil {
|
||||
in, err = blob.Get(&getBlobOptions)
|
||||
} else {
|
||||
in, err = blob.GetRange(&getBlobRangeOptions)
|
||||
}
|
||||
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open for download")
|
||||
}
|
||||
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
|
||||
return in, nil
|
||||
}
|
||||
|
||||
@@ -929,12 +997,18 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// readSeeker joins an io.Reader and an io.Seeker
|
||||
type readSeeker struct {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, putBlobOptions *storage.PutBlobOptions) (err error) {
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(chunkSize)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
var totalParts int64
|
||||
for {
|
||||
// Calculate number of parts
|
||||
@@ -954,31 +1028,37 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *storage.Blob, p
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Create an empty blob
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.CreateBlockBlob(putBlobOptions)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
bytesID = make([]byte, 8)
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks = make([]storage.Block, 0, totalParts)
|
||||
blocks = make([]string, totalParts)
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
binary.LittleEndian.PutUint64(bytesID, rawID)
|
||||
blockID = base64.StdEncoding.EncodeToString(bytesID)
|
||||
blocks = append(blocks, storage.Block{
|
||||
ID: blockID,
|
||||
Status: storage.BlockStatusLatest,
|
||||
})
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
@@ -1021,13 +1101,11 @@ outer:
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
putBlockOptions := storage.PutBlockOptions{
|
||||
ContentMD5: base64.StdEncoding.EncodeToString(md5sum[:]),
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = blob.PutBlockWithLength(blockID, uint64(len(buf)), wrap(bytes.NewBuffer(buf)), &putBlockOptions)
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, rs, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
@@ -1057,9 +1135,8 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
putBlockListOptions := storage.PutBlockListOptions{}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.PutBlockList(blocks, &putBlockListOptions)
|
||||
_, err := blockBlobURL.CommitBlockList(ctx, blocks, *httpHeaders, o.meta, azblob.BlobAccessConditions{})
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1077,29 +1154,45 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
blob := o.getBlobWithModTime(src.ModTime())
|
||||
blob.Properties.ContentType = fs.MimeType(o)
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
blob.Properties.ContentMD5 = base64.StdEncoding.EncodeToString(sourceMD5bytes)
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
putBlobOptions := storage.PutBlobOptions{}
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: 4,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if size >= int64(uploadCutoff) {
|
||||
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, blob, &putBlobOptions)
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
if size == 0 {
|
||||
in = nil
|
||||
}
|
||||
err = blob.CreateBlockBlobFromReader(in, &putBlobOptions)
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
_, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions)
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
@@ -1113,9 +1206,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
blob := o.getBlobReference()
|
||||
options := storage.DeleteBlobOptions{}
|
||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||
ac := azblob.BlobAccessConditions{}
|
||||
ctx := context.Background()
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
err := blob.Delete(&options)
|
||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
|
||||
6
backend/azureblob/azureblob_unsupported.go
Normal file
6
backend/azureblob/azureblob_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
162
backend/b2/b2.go
162
backend/b2/b2.go
@@ -22,8 +22,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -34,30 +34,27 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
defaultEndpoint = "https://api.backblazeb2.com"
|
||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||
timeKey = "src_last_modified_millis"
|
||||
timeHeader = headerPrefix + timeKey
|
||||
sha1Key = "large_file_sha1"
|
||||
sha1Header = "X-Bz-Content-Sha1"
|
||||
sha1InfoHeader = headerPrefix + sha1Key
|
||||
testModeHeader = "X-Bz-Test-Mode"
|
||||
retryAfterHeader = "Retry-After"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxParts = 10000
|
||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||
minChunkSize = 5E6
|
||||
defaultChunkSize = 96 * 1024 * 1024
|
||||
defaultUploadCutoff = 200E6
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -68,29 +65,64 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
Name: "account",
|
||||
Help: "Account ID",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Name: "key",
|
||||
Help: "Application Key",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service - leave blank normally.",
|
||||
},
|
||||
},
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "test_mode",
|
||||
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||
Default: "",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Upload chunk size. Must fit in memory.",
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
TestMode string `config:"test_mode"`
|
||||
Versions bool `config:"versions"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
account string // account name
|
||||
key string // auth key
|
||||
endpoint string // name of the starting api endpoint
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
@@ -232,33 +264,37 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < chunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
||||
if opt.UploadCutoff < opt.ChunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||
}
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
if opt.Account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
if opt.Key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
@@ -269,8 +305,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
if opt.TestMode != "" {
|
||||
testMode := strings.TrimSpace(opt.TestMode)
|
||||
f.srv.SetHeader(testModeHeader, testMode)
|
||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||
}
|
||||
@@ -316,9 +352,9 @@ func (f *Fs) authorizeAccount() error {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.endpoint,
|
||||
UserName: f.account,
|
||||
Password: f.key,
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
@@ -384,7 +420,7 @@ func (f *Fs) clearUploadURL() {
|
||||
func (f *Fs) getUploadBlock() []byte {
|
||||
buf := <-f.bufferTokens
|
||||
if buf == nil {
|
||||
buf = make([]byte, chunkSize)
|
||||
buf = make([]byte, f.opt.ChunkSize)
|
||||
}
|
||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||
return buf
|
||||
@@ -393,7 +429,7 @@ func (f *Fs) getUploadBlock() []byte {
|
||||
// putUploadBlock returns a block to the pool of size chunkSize
|
||||
func (f *Fs) putUploadBlock(buf []byte) {
|
||||
buf = buf[:cap(buf)]
|
||||
if len(buf) != int(chunkSize) {
|
||||
if len(buf) != int(f.opt.ChunkSize) {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||
@@ -563,7 +599,7 @@ func (f *Fs) markBucketOK() {
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -635,7 +671,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1035,12 +1071,12 @@ func (o *Object) readMetaData() (err error) {
|
||||
maxSearched := 1
|
||||
var timestamp api.Timestamp
|
||||
baseRemote := o.remote
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1254,7 +1290,7 @@ func urlEncode(in string) string {
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
@@ -1289,7 +1325,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if size > int64(uploadCutoff) {
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1408,10 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
if *b2Versions {
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
if *b2HardDelete {
|
||||
if o.fs.opt.HardDelete {
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
|
||||
@@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts = size / int64(o.fs.opt.ChunkSize)
|
||||
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
@@ -409,8 +409,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||
reqSize = int64(up.f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -46,6 +47,7 @@ const (
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -61,7 +63,6 @@ var (
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -70,27 +71,37 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("box", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id - leave blank normally.",
|
||||
Help: "Box App Client Id.\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret - leave blank normally.",
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -219,13 +230,20 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < minUploadCutoff {
|
||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Box: %v", err)
|
||||
}
|
||||
@@ -233,6 +251,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||
@@ -1035,7 +1054,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(uploadCutoff) {
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
|
||||
438
backend/cache/cache.go
vendored
438
backend/cache/cache.go
vendored
@@ -18,7 +18,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
@@ -30,13 +31,13 @@ import (
|
||||
|
||||
const (
|
||||
// DefCacheChunkSize is the default value for chunk size
|
||||
DefCacheChunkSize = "5M"
|
||||
DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
|
||||
DefCacheTotalChunkSize = "10G"
|
||||
DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
|
||||
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
|
||||
DefCacheChunkCleanInterval = "1m"
|
||||
DefCacheChunkCleanInterval = fs.Duration(time.Minute)
|
||||
// DefCacheInfoAge is the default value for object info age
|
||||
DefCacheInfoAge = "6h"
|
||||
DefCacheInfoAge = fs.Duration(6 * time.Hour)
|
||||
// DefCacheReadRetries is the default value for read retries
|
||||
DefCacheReadRetries = 10
|
||||
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
||||
@@ -48,29 +49,9 @@ const (
|
||||
// DefCacheWrites will cache file data on writes through the cache
|
||||
DefCacheWrites = false
|
||||
// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
|
||||
DefCacheTmpWaitTime = "15m"
|
||||
DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
|
||||
// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
|
||||
DefCacheDbWaitTime = 1 * time.Second
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
|
||||
cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
|
||||
cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
||||
cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
||||
cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
||||
cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
||||
cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
||||
cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
||||
cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
||||
cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
||||
cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
||||
cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
||||
cacheTempWritePath = flags.StringP("cache-tmp-upload-path", "", "", "Directory to keep temporary files until they are uploaded to the cloud storage")
|
||||
cacheTempWaitTime = flags.StringP("cache-tmp-wait-time", "", DefCacheTmpWaitTime, "How long should files be stored in local cache before being uploaded")
|
||||
cacheDbWaitTime = flags.DurationP("cache-db-wait-time", "", DefCacheDbWaitTime, "How long to wait for the DB to be available - 0 is unlimited")
|
||||
DefCacheDbWaitTime = fs.Duration(1 * time.Second)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -80,73 +61,155 @@ func init() {
|
||||
Description: "Cache a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "plex_url",
|
||||
Help: "Optional: The URL of the Plex server",
|
||||
Optional: true,
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "Optional: The username of the Plex user",
|
||||
Optional: true,
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user",
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "Optional: The password of the Plex user",
|
||||
Help: "The password of the Plex user",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "1h",
|
||||
Help: "1 hour",
|
||||
}, {
|
||||
Value: "24h",
|
||||
Help: "24 hours",
|
||||
}, {
|
||||
Value: "48h",
|
||||
Help: "48 hours",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "chunk_size",
|
||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||
Default: DefCacheChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1m",
|
||||
Help: "1MB",
|
||||
}, {
|
||||
Value: "5M",
|
||||
Help: "5 MB",
|
||||
}, {
|
||||
Value: "10M",
|
||||
Help: "10 MB",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. \nDefault: " + DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Name: "info_age",
|
||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||
Default: DefCacheInfoAge,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "1h",
|
||||
Help: "1 hour",
|
||||
}, {
|
||||
Value: "24h",
|
||||
Help: "24 hours",
|
||||
}, {
|
||||
Value: "48h",
|
||||
Help: "48 hours",
|
||||
}},
|
||||
}, {
|
||||
Name: "chunk_total_size",
|
||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||
Default: DefCacheTotalChunkSize,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "500M",
|
||||
Help: "500 MB",
|
||||
}, {
|
||||
Value: "1G",
|
||||
Help: "1 GB",
|
||||
}, {
|
||||
Value: "10G",
|
||||
Help: "10 GB",
|
||||
}},
|
||||
}, {
|
||||
Name: "db_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache DB",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_path",
|
||||
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||
Help: "Directory to cache chunk files",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_purge",
|
||||
Default: false,
|
||||
Help: "Purge the cache DB before",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_clean_interval",
|
||||
Default: DefCacheChunkCleanInterval,
|
||||
Help: "Interval at which chunk cleanup runs",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "read_retries",
|
||||
Default: DefCacheReadRetries,
|
||||
Help: "How many times to retry a read from a cache storage",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "workers",
|
||||
Default: DefCacheTotalWorkers,
|
||||
Help: "How many workers should run in parallel to download chunks",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_no_memory",
|
||||
Default: DefCacheChunkNoMemory,
|
||||
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "rps",
|
||||
Default: int(DefCacheRps),
|
||||
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "writes",
|
||||
Default: DefCacheWrites,
|
||||
Help: "Will cache file data on writes through the FS",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_upload_path",
|
||||
Default: "",
|
||||
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "tmp_wait_time",
|
||||
Default: DefCacheTmpWaitTime,
|
||||
Help: "How long should files be stored in local cache before being uploaded",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "db_wait_time",
|
||||
Default: DefCacheDbWaitTime,
|
||||
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
PlexURL string `config:"plex_url"`
|
||||
PlexUsername string `config:"plex_username"`
|
||||
PlexPassword string `config:"plex_password"`
|
||||
PlexToken string `config:"plex_token"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
InfoAge fs.Duration `config:"info_age"`
|
||||
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
|
||||
DbPath string `config:"db_path"`
|
||||
ChunkPath string `config:"chunk_path"`
|
||||
DbPurge bool `config:"db_purge"`
|
||||
ChunkCleanInterval fs.Duration `config:"chunk_clean_interval"`
|
||||
ReadRetries int `config:"read_retries"`
|
||||
TotalWorkers int `config:"workers"`
|
||||
ChunkNoMemory bool `config:"chunk_no_memory"`
|
||||
Rps int `config:"rps"`
|
||||
StoreWrites bool `config:"writes"`
|
||||
TempWritePath string `config:"tmp_upload_path"`
|
||||
TempWaitTime fs.Duration `config:"tmp_wait_time"`
|
||||
DbWaitTime fs.Duration `config:"db_wait_time"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
@@ -154,21 +217,10 @@ type Fs struct {
|
||||
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
cache *Persistent
|
||||
|
||||
fileAge time.Duration
|
||||
chunkSize int64
|
||||
chunkTotalSize int64
|
||||
chunkCleanInterval time.Duration
|
||||
readRetries int
|
||||
totalWorkers int
|
||||
totalMaxWorkers int
|
||||
chunkMemory bool
|
||||
cacheWrites bool
|
||||
tempWritePath string
|
||||
tempWriteWait time.Duration
|
||||
tempFs fs.Fs
|
||||
tempFs fs.Fs
|
||||
|
||||
lastChunkCleanup time.Time
|
||||
cleanupMu sync.Mutex
|
||||
@@ -188,9 +240,19 @@ func parseRootPath(path string) (string, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs a Fs from the path, container:path
|
||||
func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(opt.Remote, name+":") {
|
||||
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
|
||||
}
|
||||
|
||||
@@ -199,7 +261,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||
}
|
||||
|
||||
remotePath := path.Join(remote, rpath)
|
||||
remotePath := path.Join(opt.Remote, rpath)
|
||||
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||
@@ -210,97 +272,46 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
fsErr = fs.ErrorIsFile
|
||||
rpath = cleanPath(path.Dir(rpath))
|
||||
}
|
||||
plexURL := config.FileGet(name, "plex_url")
|
||||
plexToken := config.FileGet(name, "plex_token")
|
||||
var chunkSize fs.SizeSuffix
|
||||
chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
|
||||
if *cacheChunkSize != DefCacheChunkSize {
|
||||
chunkSizeString = *cacheChunkSize
|
||||
}
|
||||
err = chunkSize.Set(chunkSizeString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand chunk size %v", chunkSizeString)
|
||||
}
|
||||
var chunkTotalSize fs.SizeSuffix
|
||||
chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
||||
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
|
||||
chunkTotalSizeString = *cacheTotalChunkSize
|
||||
}
|
||||
err = chunkTotalSize.Set(chunkTotalSizeString)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand chunk total size %v", chunkTotalSizeString)
|
||||
}
|
||||
chunkCleanIntervalStr := *cacheChunkCleanInterval
|
||||
chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
|
||||
}
|
||||
infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
|
||||
if *cacheInfoAge != DefCacheInfoAge {
|
||||
infoAge = *cacheInfoAge
|
||||
}
|
||||
infoDuration, err := time.ParseDuration(infoAge)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", infoAge)
|
||||
}
|
||||
waitTime, err := time.ParseDuration(*cacheTempWaitTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to understand duration %v", *cacheTempWaitTime)
|
||||
}
|
||||
// configure cache backend
|
||||
if *cacheDbPurge {
|
||||
if opt.DbPurge {
|
||||
fs.Debugf(name, "Purging the DB")
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
fileAge: infoDuration,
|
||||
chunkSize: int64(chunkSize),
|
||||
chunkTotalSize: int64(chunkTotalSize),
|
||||
chunkCleanInterval: chunkCleanInterval,
|
||||
readRetries: *cacheReadRetries,
|
||||
totalWorkers: *cacheTotalWorkers,
|
||||
totalMaxWorkers: *cacheTotalWorkers,
|
||||
chunkMemory: !*cacheChunkNoMemory,
|
||||
cacheWrites: *cacheStoreWrites,
|
||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||
tempWritePath: *cacheTempWritePath,
|
||||
tempWriteWait: waitTime,
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||
cleanupChan: make(chan bool, 1),
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) {
|
||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||
f.chunkTotalSize, f.chunkSize, f.totalWorkers)
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if plexURL != "" {
|
||||
if plexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, plexURL, plexToken)
|
||||
if opt.PlexURL != "" {
|
||||
if opt.PlexToken != "" {
|
||||
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
} else {
|
||||
plexUsername := config.FileGet(name, "plex_username")
|
||||
plexPassword := config.FileGet(name, "plex_password")
|
||||
if plexPassword != "" && plexUsername != "" {
|
||||
decPass, err := obscure.Reveal(plexPassword)
|
||||
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||
if err != nil {
|
||||
decPass = plexPassword
|
||||
decPass = opt.PlexPassword
|
||||
}
|
||||
f.plexConnector, err = newPlexConnector(f, plexURL, plexUsername, decPass)
|
||||
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
|
||||
m.Set("plex_token", token)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dbPath := *cacheDbPath
|
||||
chunkPath := *cacheChunkPath
|
||||
dbPath := f.opt.DbPath
|
||||
chunkPath := f.opt.ChunkPath
|
||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||
@@ -326,7 +337,8 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
fs.Infof(name, "Cache DB path: %v", dbPath)
|
||||
fs.Infof(name, "Cache chunk path: %v", chunkPath)
|
||||
f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
|
||||
PurgeDb: *cacheDbPurge,
|
||||
PurgeDb: opt.DbPurge,
|
||||
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||
@@ -335,7 +347,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
atexit.Register(func() {
|
||||
if plexURL != "" {
|
||||
if opt.PlexURL != "" {
|
||||
f.plexConnector.closeWebsocket()
|
||||
}
|
||||
f.StopBackgroundRunners()
|
||||
@@ -350,35 +362,35 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
fs.Infof(name, "Chunk Memory: %v", f.chunkMemory)
|
||||
fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize))
|
||||
fs.Infof(name, "Chunk Total Size: %v", fs.SizeSuffix(f.chunkTotalSize))
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String())
|
||||
fs.Infof(name, "Workers: %v", f.totalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.fileAge.String())
|
||||
if f.cacheWrites {
|
||||
fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
|
||||
fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
|
||||
fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
|
||||
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||
if !f.opt.StoreWrites {
|
||||
fs.Infof(name, "Cache Writes: enabled")
|
||||
}
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
err = os.MkdirAll(f.tempWritePath, os.ModePerm)
|
||||
if f.opt.TempWritePath != "" {
|
||||
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.tempWritePath)
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.tempWritePath = filepath.ToSlash(f.tempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.tempWritePath)
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.tempWriteWait.String())
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.tempWritePath)
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
f.backgroundRunner, _ = initBackgroundUploader(f)
|
||||
go f.backgroundRunner.run()
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(f.chunkCleanInterval)
|
||||
time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
|
||||
select {
|
||||
case <-f.cleanupChan:
|
||||
fs.Infof(f, "stopping cleanup")
|
||||
@@ -391,7 +403,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}()
|
||||
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
doChangeNotify(f.receiveChangeNotify, f.chunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -400,7 +412,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// override only those features that use a temp fs and it doesn't support them
|
||||
//f.features.ChangeNotify = f.ChangeNotify
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
if f.tempFs.Features().Copy == nil {
|
||||
f.features.Copy = nil
|
||||
}
|
||||
@@ -563,7 +575,7 @@ func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
||||
// notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
|
||||
// or if we use a temp fs
|
||||
func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
|
||||
if f.Fs.Features().ChangeNotify == nil || f.tempWritePath != "" {
|
||||
if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
|
||||
f.notifyChangeUpstream(remote, entryType)
|
||||
}
|
||||
}
|
||||
@@ -613,17 +625,17 @@ func (f *Fs) String() string {
|
||||
|
||||
// ChunkSize returns the configured chunk size
|
||||
func (f *Fs) ChunkSize() int64 {
|
||||
return f.chunkSize
|
||||
return int64(f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
// InfoAge returns the configured file age
|
||||
func (f *Fs) InfoAge() time.Duration {
|
||||
return f.fileAge
|
||||
return time.Duration(f.opt.InfoAge)
|
||||
}
|
||||
|
||||
// TempUploadWaitTime returns the configured temp file upload wait time
|
||||
func (f *Fs) TempUploadWaitTime() time.Duration {
|
||||
return f.tempWriteWait
|
||||
return time.Duration(f.opt.TempWaitTime)
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
@@ -636,16 +648,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
err = f.cache.GetObject(co)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "find: error: %v", err)
|
||||
} else if time.Now().After(co.CacheTs.Add(f.fileAge)) {
|
||||
} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
fs.Debugf(co, "find: cold object: %+v", co)
|
||||
} else {
|
||||
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(f.fileAge))
|
||||
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// search for entry in source or temp fs
|
||||
var obj fs.Object
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
obj, err = f.tempFs.NewObject(remote)
|
||||
// not found in temp fs
|
||||
if err != nil {
|
||||
@@ -679,13 +691,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.cache.GetDirEntries(cd)
|
||||
if err != nil {
|
||||
fs.Debugf(dir, "list: error: %v", err)
|
||||
} else if time.Now().After(cd.CacheTs.Add(f.fileAge)) {
|
||||
} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
|
||||
} else if len(entries) == 0 {
|
||||
// TODO: read empty dirs from source?
|
||||
fs.Debugf(dir, "list: empty listing")
|
||||
} else {
|
||||
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(f.fileAge))
|
||||
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||
return entries, nil
|
||||
}
|
||||
@@ -693,7 +705,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
// we first search any temporary files stored locally
|
||||
var cachedEntries fs.DirEntries
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
|
||||
if err != nil {
|
||||
fs.Errorf(dir, "list: error getting pending uploads: %v", err)
|
||||
@@ -744,7 +756,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
case fs.Directory:
|
||||
cdd := DirectoryFromOriginal(f, o)
|
||||
// check if the dir isn't expired and add it in cache if it isn't
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(f.fileAge)) {
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
batchDirectories = append(batchDirectories, cdd)
|
||||
}
|
||||
cachedEntries = append(cachedEntries, cdd)
|
||||
@@ -867,7 +879,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
fs.Debugf(f, "rmdir '%s'", dir)
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// pause background uploads
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
@@ -952,7 +964,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// pause background uploads
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
@@ -1079,7 +1091,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
go func() {
|
||||
var offset int64
|
||||
for {
|
||||
chunk := make([]byte, f.chunkSize)
|
||||
chunk := make([]byte, f.opt.ChunkSize)
|
||||
readSize, err := io.ReadFull(pr, chunk)
|
||||
// we ignore 3 failures which are ok:
|
||||
// 1. EOF - original reading finished and we got a full buffer too
|
||||
@@ -1127,7 +1139,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
var obj fs.Object
|
||||
|
||||
// queue for upload and store in temp fs if configured
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
// we need to clear the caches before a put through temp fs
|
||||
parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
|
||||
_ = f.cache.ExpireDir(parentCd)
|
||||
@@ -1146,7 +1158,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
fs.Infof(obj, "put: queued for upload")
|
||||
// if cache writes is enabled write it first through cache
|
||||
} else if f.cacheWrites {
|
||||
} else if f.opt.StoreWrites {
|
||||
f.cacheReader(in, src, func(inn io.Reader) {
|
||||
obj, err = put(inn, src, options...)
|
||||
})
|
||||
@@ -1243,7 +1255,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
if f.tempWritePath == "" {
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
@@ -1319,7 +1331,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
if f.tempWritePath == "" {
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
@@ -1460,8 +1472,8 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
f.cleanupMu.Lock()
|
||||
defer f.cleanupMu.Unlock()
|
||||
|
||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkCleanInterval)) {
|
||||
f.cache.CleanChunksBySize(f.chunkTotalSize)
|
||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
|
||||
f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
|
||||
f.lastChunkCleanup = time.Now()
|
||||
}
|
||||
}
|
||||
@@ -1470,7 +1482,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
||||
// can be triggered from a terminate signal or from testing between runs
|
||||
func (f *Fs) StopBackgroundRunners() {
|
||||
f.cleanupChan <- false
|
||||
if f.tempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
||||
if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
||||
f.backgroundRunner.close()
|
||||
}
|
||||
f.cache.Close()
|
||||
@@ -1528,7 +1540,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
|
||||
// in the background
|
||||
func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
return f.backgroundRunner.notifyCh
|
||||
}
|
||||
return nil
|
||||
|
||||
82
backend/cache/cache_internal_test.go
vendored
82
backend/cache/cache_internal_test.go
vendored
@@ -33,13 +33,13 @@ import (
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -699,7 +699,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
rc.Start(&rcflags.Opt)
|
||||
|
||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if !runInstance.useMount {
|
||||
@@ -774,7 +774,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -793,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
@@ -868,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
@@ -918,10 +918,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
// run holds the remotes for a test run
|
||||
type run struct {
|
||||
okDiff time.Duration
|
||||
allCfgMap map[string]string
|
||||
allFlagMap map[string]string
|
||||
runDefaultCfgMap map[string]string
|
||||
runDefaultFlagMap map[string]string
|
||||
runDefaultCfgMap configmap.Simple
|
||||
mntDir string
|
||||
tmpUploadDir string
|
||||
useMount bool
|
||||
@@ -945,38 +942,16 @@ func newRun() *run {
|
||||
isMounted: false,
|
||||
}
|
||||
|
||||
r.allCfgMap = map[string]string{
|
||||
"plex_url": "",
|
||||
"plex_username": "",
|
||||
"plex_password": "",
|
||||
"chunk_size": cache.DefCacheChunkSize,
|
||||
"info_age": cache.DefCacheInfoAge,
|
||||
"chunk_total_size": cache.DefCacheTotalChunkSize,
|
||||
// Read in all the defaults for all the options
|
||||
fsInfo, err := fs.Find("cache")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
|
||||
}
|
||||
r.allFlagMap = map[string]string{
|
||||
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
|
||||
"cache-db-purge": "true",
|
||||
"cache-chunk-size": cache.DefCacheChunkSize,
|
||||
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
|
||||
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
|
||||
"cache-info-age": cache.DefCacheInfoAge,
|
||||
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
|
||||
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
|
||||
"cache-chunk-no-memory": "false",
|
||||
"cache-rps": strconv.Itoa(cache.DefCacheRps),
|
||||
"cache-writes": "false",
|
||||
"cache-tmp-upload-path": "",
|
||||
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
|
||||
}
|
||||
r.runDefaultCfgMap = make(map[string]string)
|
||||
for key, value := range r.allCfgMap {
|
||||
r.runDefaultCfgMap[key] = value
|
||||
}
|
||||
r.runDefaultFlagMap = make(map[string]string)
|
||||
for key, value := range r.allFlagMap {
|
||||
r.runDefaultFlagMap[key] = value
|
||||
r.runDefaultCfgMap = configmap.Simple{}
|
||||
for _, option := range fsInfo.Options {
|
||||
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||
}
|
||||
|
||||
if mountDir == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||
@@ -1086,28 +1061,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
if c, ok := cfg[k]; ok {
|
||||
config.FileSet(cacheRemote, k, c)
|
||||
} else {
|
||||
config.FileSet(cacheRemote, k, v)
|
||||
}
|
||||
}
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
if c, ok := flags[k]; ok {
|
||||
_ = flag.Set(k, c)
|
||||
} else {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
fs.Config.LowLevelRetries = 1
|
||||
|
||||
m := configmap.Simple{}
|
||||
for k, v := range r.runDefaultCfgMap {
|
||||
m.Set(k, v)
|
||||
}
|
||||
for k, v := range flags {
|
||||
m.Set(k, v)
|
||||
}
|
||||
|
||||
// Instantiate root
|
||||
if purge {
|
||||
boltDb.PurgeTempUploads()
|
||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||
}
|
||||
f, err := fs.NewFs(remote + ":" + id)
|
||||
f, err := cache.NewFs(remote, id, m)
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1157,9 +1126,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
for k, v := range r.runDefaultFlagMap {
|
||||
_ = flag.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
|
||||
14
backend/cache/cache_upload_test.go
vendored
14
backend/cache/cache_upload_test.go
vendored
@@ -22,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
@@ -63,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
@@ -73,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
@@ -83,7 +83,7 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
@@ -163,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
@@ -213,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
@@ -343,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
--- cache_upload_test.go
|
||||
+++ cache_upload_test.go
|
||||
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
}
|
||||
r.tempFiles = nil
|
||||
debug.FreeOSMemory()
|
||||
- for k, v := range r.runDefaultFlagMap {
|
||||
- _ = flag.Set(k, v)
|
||||
- }
|
||||
}
|
||||
|
||||
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
||||
28
backend/cache/handle.go
vendored
28
backend/cache/handle.go
vendored
@@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: cfs.chunkMemory,
|
||||
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
@@ -98,7 +98,7 @@ func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
@@ -156,7 +156,7 @@ func (r *Handle) confirmExternalReading() {
|
||||
return
|
||||
}
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
@@ -179,7 +179,7 @@ func (r *Handle) queueOffset(offset int64) {
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
@@ -228,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
@@ -255,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
if offset > 0 {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
@@ -338,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
@@ -451,7 +451,7 @@ func (w *worker) run() {
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
@@ -466,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
@@ -612,7 +612,7 @@ func (b *backgroundWriter) run() {
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
|
||||
10
backend/cache/object.go
vendored
10
backend/cache/object.go
vendored
@@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
if f.opt.TempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
@@ -153,7 +153,7 @@ func (o *Object) Storable() bool {
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
@@ -237,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
@@ -274,7 +274,7 @@ func (o *Object) Remove() error {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
if o.CacheFs.opt.TempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
|
||||
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@@ -16,7 +16,6 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
@@ -60,10 +59,11 @@ type plexConnector struct {
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
saveToken func(string)
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -76,6 +76,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector
|
||||
password: password,
|
||||
token: "",
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
saveToken: saveToken,
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
@@ -209,8 +210,7 @@ func (p *plexConnector) authenticate() error {
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
p.saveToken(p.token)
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
9
backend/cache/storage_persistent.go
vendored
9
backend/cache/storage_persistent.go
vendored
@@ -34,7 +34,8 @@ const (
|
||||
|
||||
// Features flags for this storage type
|
||||
type Features struct {
|
||||
PurgeDb bool // purge the db before starting
|
||||
PurgeDb bool // purge the db before starting
|
||||
DbWaitTime time.Duration // time to wait for DB to be available
|
||||
}
|
||||
|
||||
var boltMap = make(map[string]*Persistent)
|
||||
@@ -122,7 +123,7 @@ func (b *Persistent) connect() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||
}
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
|
||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||
}
|
||||
@@ -342,7 +343,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||
// chunks will remain as they are
|
||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
||||
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
||||
cd.CacheTs = &t
|
||||
|
||||
// expire all parents
|
||||
@@ -429,7 +430,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
||||
|
||||
// ExpireObject will flush an Object and all its data if desired
|
||||
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
||||
co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
|
||||
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
||||
err := b.AddObject(co)
|
||||
if withData {
|
||||
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
||||
|
||||
@@ -5,24 +5,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -30,11 +24,13 @@ func init() {
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Default: "standard",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
@@ -48,8 +44,9 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
@@ -68,50 +65,67 @@ func init() {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "show_mapping",
|
||||
Help: "For all files listed show how the names encrypt.",
|
||||
Default: false,
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
if opt.Password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err = obscure.Reveal(password)
|
||||
password, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = obscure.Reveal(salt)
|
||||
var salt string
|
||||
if opt.Password2 != "" {
|
||||
salt, err = obscure.Reveal(opt.Password2)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := config.FileGet(name, "remote")
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipherForConfig(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
@@ -130,6 +144,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
@@ -161,11 +176,22 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
FilenameEncryption string `config:"filename_encryption"`
|
||||
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||
Password string `config:"password"`
|
||||
Password2 string `config:"password2"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
}
|
||||
@@ -198,7 +224,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
@@ -212,7 +238,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -49,27 +50,13 @@ const (
|
||||
defaultExtensions = "docx,xlsx,pptx,svg"
|
||||
scopePrefix = "https://www.googleapis.com/auth/"
|
||||
defaultScope = "drive"
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
||||
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
||||
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
||||
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
||||
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
||||
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
||||
driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.")
|
||||
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
||||
driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.")
|
||||
driveAlternateExport = flags.BoolP("drive-alternate-export", "", false, "Use alternate export URLs for google documents export.")
|
||||
driveAcknowledgeAbuse = flags.BoolP("drive-acknowledge-abuse", "", false, "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.")
|
||||
driveKeepRevisionForever = flags.BoolP("drive-keep-revision-forever", "", false, "Keep new head revision forever.")
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||
driveUploadCutoff = chunkSize
|
||||
// Description of how to auth for this app
|
||||
driveConfig = &oauth2.Config{
|
||||
Scopes: []string{scopePrefix + "drive"},
|
||||
@@ -112,38 +99,43 @@ func init() {
|
||||
Name: "drive",
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
var err error
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
// Fill in the scopes
|
||||
scope := config.FileGet(name, "scope")
|
||||
if scope == "" {
|
||||
scope = defaultScope
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(scope, ",") {
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
config.FileSet(name, "root_folder_id", "appDataFolder")
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
}
|
||||
if config.FileGet(name, "service_account_file") == "" {
|
||||
err = oauthutil.Config("drive", name, driveConfig)
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(name)
|
||||
err = configTeamDrive(opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure team drive: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "scope",
|
||||
Help: "Scope that rclone should use when requesting access from drive.",
|
||||
@@ -165,14 +157,97 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "ID of the root folder - leave blank normally. Fill in to access \"Computers\" folders. (see docs).",
|
||||
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - leave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Team Drive",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
Help: "Only consider files owned by the authenticated user.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
Help: "Send files to the trash instead of deleting permanently.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_gdocs",
|
||||
Default: false,
|
||||
Help: "Skip google documents in all listings.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "shared_with_me",
|
||||
Default: false,
|
||||
Help: "Only show files that are shared with me",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Default: false,
|
||||
Help: "Only show files that are in the trash",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "formats",
|
||||
Default: defaultExtensions,
|
||||
Help: "Comma separated list of preferred formats for downloading Google docs.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_created_date",
|
||||
Default: false,
|
||||
Help: "Use created date instead of modified date.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: "Impersonate this user when using a service account.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
Help: "Use alternate export URLs for google documents export.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Cutoff for switching to chunked upload",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
Help: "Upload chunk size. Must a power of 2 >= 256k.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "acknowledge_abuse",
|
||||
Default: false,
|
||||
Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
Default: false,
|
||||
Help: "Keep new head revision forever.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Default: false,
|
||||
Help: "Untrash any trashed files - use with --drive-trashed-only.",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
||||
|
||||
// Invert mimeTypeToExtension
|
||||
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
||||
@@ -181,10 +256,35 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Scope string `config:"scope"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
TeamDriveID string `config:"team_drive"`
|
||||
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||
UseTrash bool `config:"use_trash"`
|
||||
SkipGdocs bool `config:"skip_gdocs"`
|
||||
SharedWithMe bool `config:"shared_with_me"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
Extensions string `config:"formats"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||
Untrash bool `config:"untrash"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *drive.Service // the connection to the drive server
|
||||
client *http.Client // authorized client
|
||||
@@ -192,7 +292,6 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
extensions []string // preferred extensions to download docs
|
||||
teamDriveID string // team drive ID, may be ""
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
}
|
||||
|
||||
@@ -274,8 +373,8 @@ type listFn func(*drive.File) bool
|
||||
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
||||
var query []string
|
||||
if !includeAll {
|
||||
q := "trashed=" + strconv.FormatBool(*driveTrashedOnly)
|
||||
if *driveTrashedOnly {
|
||||
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
||||
if f.opt.TrashedOnly {
|
||||
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
||||
}
|
||||
query = append(query, q)
|
||||
@@ -283,10 +382,10 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||
if *driveSharedWithMe && dirID == f.rootFolderID {
|
||||
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||
query = append(query, "sharedWithMe=true")
|
||||
}
|
||||
if dirID != "" && !(*driveSharedWithMe && dirID == f.rootFolderID) {
|
||||
if dirID != "" && !(f.opt.SharedWithMe && dirID == f.rootFolderID) {
|
||||
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
||||
}
|
||||
if title != "" {
|
||||
@@ -308,11 +407,11 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
list.Q(strings.Join(query, " and "))
|
||||
// fmt.Printf("list Query = %q\n", query)
|
||||
}
|
||||
if *driveListChunk > 0 {
|
||||
list.PageSize(*driveListChunk)
|
||||
if f.opt.ListChunk > 0 {
|
||||
list.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
list.TeamDriveId(f.teamDriveID)
|
||||
list.TeamDriveId(f.opt.TeamDriveID)
|
||||
list.SupportsTeamDrives(true)
|
||||
list.IncludeTeamDriveItems(true)
|
||||
list.Corpora("teamDrive")
|
||||
@@ -324,7 +423,7 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
||||
|
||||
var fields = partialFields
|
||||
|
||||
if *driveAuthOwnerOnly {
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
|
||||
@@ -395,17 +494,16 @@ func (f *Fs) parseExtensions(extensions string) error {
|
||||
}
|
||||
|
||||
// Figure out if the user wants to use a team drive
|
||||
func configTeamDrive(name string) error {
|
||||
teamDrive := config.FileGet(name, "team_drive")
|
||||
if teamDrive == "" {
|
||||
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||
if opt.TeamDriveID == "" {
|
||||
fmt.Printf("Configure this as a team drive?\n")
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", teamDrive)
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.ConfirmWithDefault(false) {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(name)
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "config team drive failed to create oauth client")
|
||||
}
|
||||
@@ -440,7 +538,8 @@ func configTeamDrive(name string) error {
|
||||
} else {
|
||||
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||
}
|
||||
config.FileSet(name, "team_drive", driveID)
|
||||
m.Set("team_drive", driveID)
|
||||
opt.TeamDriveID = driveID
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -449,39 +548,37 @@ func newPacer() *pacer.Pacer {
|
||||
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
if *driveImpersonate != "" {
|
||||
conf.Subject = *driveImpersonate
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
func createOAuthClient(name string) (*http.Client, error) {
|
||||
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
serviceAccountCreds = loadedCreds
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if len(serviceAccountCreds) > 0 {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, driveConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
@@ -491,15 +588,21 @@ func createOAuthClient(name string) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, path string) (fs.Fs, error) {
|
||||
if !isPowerOfTwo(int64(chunkSize)) {
|
||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkSize < 256*1024 {
|
||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
||||
if !isPowerOfTwo(int64(opt.ChunkSize)) {
|
||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize)
|
||||
}
|
||||
if opt.ChunkSize < 256*1024 {
|
||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
|
||||
}
|
||||
|
||||
oAuthClient, err := createOAuthClient(name)
|
||||
oAuthClient, err := createOAuthClient(opt, name, m)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
@@ -512,10 +615,10 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(),
|
||||
}
|
||||
f.teamDriveID = config.FileGet(name, "team_drive")
|
||||
f.isTeamDrive = f.teamDriveID != ""
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
@@ -532,20 +635,20 @@ func NewFs(name, path string) (fs.Fs, error) {
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if f.isTeamDrive {
|
||||
f.rootFolderID = f.teamDriveID
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
f.rootFolderID = "root"
|
||||
}
|
||||
|
||||
// override root folder if set in the config
|
||||
if rootID := config.FileGet(name, "root_folder_id"); rootID != "" {
|
||||
f.rootFolderID = rootID
|
||||
if opt.RootFolderID != "" {
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
|
||||
// Parse extensions
|
||||
err = f.parseExtensions(*driveExtensions)
|
||||
err = f.parseExtensions(opt.Extensions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -722,6 +825,23 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
var iErr error
|
||||
_, err = f.list(directoryID, "", false, false, false, func(item *drive.File) bool {
|
||||
remote := path.Join(dir, item.Name)
|
||||
|
||||
// Untrash all trashed files if required
|
||||
if f.opt.Untrash && item.Trashed {
|
||||
fs.Infof(remote, "Untrashing")
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info := drive.File{
|
||||
Trashed: false,
|
||||
ForceSendFields: []string{"Trashed"},
|
||||
}
|
||||
_, err = f.svc.Files.Update(item.Id, &info).Fields("").SupportsTeamDrives(f.isTeamDrive).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(remote, "Untrashing failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
@@ -729,7 +849,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
entries = append(entries, d)
|
||||
case *driveAuthOwnerOnly && !isAuthOwned(item):
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
case item.Md5Checksum != "" || item.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
@@ -739,7 +859,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
case *driveSkipGdocs:
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
||||
default:
|
||||
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
||||
@@ -760,7 +880,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
obj := o.(*Object)
|
||||
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
||||
if *driveAlternateExport {
|
||||
if f.opt.AlternateExport {
|
||||
switch item.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
||||
@@ -854,11 +974,11 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
if size == 0 || size < int64(f.opt.UploadCutoff) {
|
||||
// Make the API request to upload metadata and file data.
|
||||
// Don't retry, return a retry error instead
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -972,7 +1092,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
// trash the directory if it had trashed files
|
||||
// in or the user wants to trash, otherwise
|
||||
// delete it.
|
||||
err = f.rmdir(directoryID, trashedFiles || *driveUseTrash)
|
||||
err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1015,7 +1135,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
var info *drive.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1040,7 +1160,7 @@ func (f *Fs) Purge() error {
|
||||
return err
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
if f.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
@@ -1316,11 +1436,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
|
||||
if *driveListChunk > 0 {
|
||||
changesCall.PageSize(*driveListChunk)
|
||||
if f.opt.ListChunk > 0 {
|
||||
changesCall.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
changesCall.TeamDriveId(f.teamDriveID)
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
changesCall.SupportsTeamDrives(true)
|
||||
changesCall.IncludeTeamDriveItems(true)
|
||||
}
|
||||
@@ -1444,7 +1564,7 @@ func (o *Object) setMetaData(info *drive.File) {
|
||||
o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id)
|
||||
o.md5sum = strings.ToLower(info.Md5Checksum)
|
||||
o.bytes = info.Size
|
||||
if *driveUseCreatedDate {
|
||||
if o.fs.opt.UseCreatedDate {
|
||||
o.modifiedDate = info.CreatedTime
|
||||
} else {
|
||||
o.modifiedDate = info.ModifiedTime
|
||||
@@ -1617,7 +1737,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
_, res, err := o.httpResponse("GET", options)
|
||||
if err != nil {
|
||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||
if *driveAcknowledgeAbuse {
|
||||
if o.fs.opt.AcknowledgeAbuse {
|
||||
// Retry acknowledging abuse
|
||||
if strings.ContainsRune(o.url, '?') {
|
||||
o.url += "&"
|
||||
@@ -1663,10 +1783,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Make the API request to upload metadata and file data.
|
||||
var err error
|
||||
var info *drive.File
|
||||
if size == 0 || size < int64(driveUploadCutoff) {
|
||||
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1690,7 +1810,7 @@ func (o *Object) Remove() error {
|
||||
}
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if *driveUseTrash {
|
||||
if o.fs.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string,
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if *driveKeepRevisionForever {
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
@@ -197,11 +197,11 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(chunkSize))
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
|
||||
@@ -37,7 +37,8 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -55,24 +56,6 @@ const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
// Upload chunk size - setting too small makes uploads slow.
|
||||
// Chunks are buffered into memory for retries.
|
||||
//
|
||||
@@ -96,8 +79,26 @@ var (
|
||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||
// by default.
|
||||
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
|
||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
||||
defaultChunkSize = 48 * 1024 * 1024
|
||||
maxChunkSize = 150 * 1024 * 1024
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
dropboxConfig = &oauth2.Config{
|
||||
Scopes: []string{},
|
||||
// Endpoint: oauth2.Endpoint{
|
||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,27 +107,37 @@ func init() {
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id - leave blank normally.",
|
||||
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
||||
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
@@ -185,15 +196,22 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadChunkSize > maxUploadChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize > maxChunkSize {
|
||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||
}
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
// just a string, the new one is a JSON blob
|
||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
||||
if oldToken != "" && oldToken[0] != '{' {
|
||||
oldToken, ok := m.Get(config.ConfigToken)
|
||||
oldToken = strings.TrimSpace(oldToken)
|
||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
@@ -202,13 +220,14 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
@@ -911,7 +930,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||
// avoidable request to the Dropbox API that does not carry payload.
|
||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||
chunkSize := int64(uploadChunkSize)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
chunks := 0
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
@@ -1026,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
var err error
|
||||
var entry *files.FileMetadata
|
||||
if size > int64(uploadChunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
|
||||
@@ -4,16 +4,15 @@ package ftp
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
@@ -30,33 +29,40 @@ func init() {
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21) ",
|
||||
Optional: true,
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Optional: false,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
@@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
pass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
user := opt.User
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
port := opt.Port
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := host + ":" + port
|
||||
dialAddr := opt.Host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
@@ -480,6 +468,8 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -29,7 +29,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -55,8 +56,6 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
@@ -71,29 +70,36 @@ var (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google cloud storage",
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
if saFile != "" || saCreds != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
Help: "Google Application Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
||||
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
@@ -207,22 +213,29 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
projectNumber string // used for finding buckets
|
||||
objectACL string // used when creating new objects
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -315,27 +328,37 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ObjectACL == "" {
|
||||
opt.ObjectACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = "private"
|
||||
}
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
||||
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
serviceAccountCreds = loadedCreds
|
||||
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||
}
|
||||
if len(serviceAccountCreds) > 0 {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
||||
if opt.ServiceAccountCredentials != "" {
|
||||
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
@@ -347,33 +370,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
if f.bucketACL == "" {
|
||||
f.bucketACL = "private"
|
||||
}
|
||||
if *gcsLocation != "" {
|
||||
f.location = *gcsLocation
|
||||
}
|
||||
if *gcsStorageClass != "" {
|
||||
f.storageClass = *gcsStorageClass
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
@@ -480,7 +487,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
@@ -550,10 +557,10 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -672,17 +679,17 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.projectNumber == "" {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -948,7 +955,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
@@ -35,7 +36,7 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
@@ -45,11 +46,17 @@ func init() {
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
@@ -78,14 +85,20 @@ func statusError(res *http.Response, err error) error {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(endpoint)
|
||||
base, err := url.Parse(opt.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -130,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -29,7 +30,7 @@ var (
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) func() {
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() {
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
// config.FileSet(remoteName, "type", "http")
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
return ts.Close
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "")
|
||||
f, err := NewFs(remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
@@ -177,20 +183,20 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt")
|
||||
f, err := NewFs(remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
@@ -52,18 +54,18 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("hubic", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("hubic", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
Help: "Hubic Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||
}
|
||||
@@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
||||
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -16,19 +16,11 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -41,29 +33,68 @@ func init() {
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Optional: true,
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Help: "Don't warn about skipped symlinks.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_unicode_normalization",
|
||||
Help: "Don't apply unicode normalization to paths and filenames",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_updated",
|
||||
Help: "Don't check to see if the files change during upload",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "one_file_system",
|
||||
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "x",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
OneFileSystem bool `config:"one_file_system"`
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
@@ -84,18 +115,22 @@ type Object struct {
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
if *noUTFNorm {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.NoUTFNorm {
|
||||
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
nounc: nounc == "true",
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
@@ -105,14 +140,14 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if *followSymlinks {
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
@@ -243,7 +278,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -253,7 +288,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
@@ -357,7 +392,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dev = readDevice(fi)
|
||||
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -643,7 +678,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
return false
|
||||
@@ -668,7 +703,7 @@ type localOpenFile struct {
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
if !*noCheckUpdated {
|
||||
if !file.o.fs.opt.NoCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
@@ -878,7 +913,7 @@ func (f *Fs) cleanPath(s string) string {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.nounc {
|
||||
if !f.opt.NoUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
@@ -65,11 +65,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
|
||||
*noCheckUpdated = true
|
||||
defer func() {
|
||||
*noCheckUpdated = false
|
||||
}()
|
||||
in.o.fs.opt.NoCheckUpdated = true
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
|
||||
@@ -8,6 +8,6 @@ import "os"
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
return devUnset
|
||||
}
|
||||
|
||||
@@ -9,17 +9,12 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
// returning devUnset if it fails.
|
||||
func readDevice(fi os.FileInfo) uint64 {
|
||||
if !*oneFileSystem {
|
||||
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
if !oneFileSystem {
|
||||
return devUnset
|
||||
}
|
||||
statT, ok := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -38,12 +38,12 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
eventWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
useTrash = true // FIXME make configurable - rclone global
|
||||
)
|
||||
|
||||
var (
|
||||
megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.")
|
||||
megaCacheMu sync.Mutex // mutex for the below
|
||||
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
|
||||
)
|
||||
@@ -57,20 +57,33 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name",
|
||||
Optional: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Optional: true,
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: "If set then output more debug from mega.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
}
|
||||
|
||||
// Fs represents a remote mega
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
srv *mega.Mega // the connection to the server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
@@ -144,12 +157,16 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
if pass != "" {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
pass, err = obscure.Reveal(pass)
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
@@ -162,30 +179,31 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// them up between different remotes.
|
||||
megaCacheMu.Lock()
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[user]
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
if *megaDebug {
|
||||
if opt.Debug {
|
||||
srv.SetDebugger(func(format string, v ...interface{}) {
|
||||
fs.Debugf("*go-mega*", format, v...)
|
||||
})
|
||||
}
|
||||
|
||||
err := srv.Login(user, pass)
|
||||
err := srv.Login(opt.User, opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't login")
|
||||
}
|
||||
megaCache[user] = srv
|
||||
megaCache[opt.User] = srv
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
@@ -195,7 +213,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
|
||||
// Find the root node and check if it is a file or not
|
||||
_, err := f.findRoot(false)
|
||||
_, err = f.findRoot(false)
|
||||
switch err {
|
||||
case nil:
|
||||
// root node found and is a directory
|
||||
@@ -570,6 +588,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
err = f.deleteNode(dirNode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "delete directory node failed")
|
||||
@@ -579,7 +599,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
if dirNode == rootNode {
|
||||
f.clearRoot()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond) // FIXME give the callback a chance
|
||||
|
||||
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -653,6 +674,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
}
|
||||
}
|
||||
|
||||
waitEvent := f.srv.WaitEventsStart()
|
||||
|
||||
// rename the object if required
|
||||
if srcLeaf != dstLeaf {
|
||||
//log.Printf("rename %q to %q", srcLeaf, dstLeaf)
|
||||
@@ -665,7 +688,8 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond) // FIXME give events a chance...
|
||||
f.srv.WaitEvents(waitEvent, eventWaitTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,10 @@
|
||||
|
||||
package api
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
@@ -93,6 +96,22 @@ type ItemReference struct {
|
||||
Path string `json:"path"` // Path that used to navigate to the item. Read/Write.
|
||||
}
|
||||
|
||||
// RemoteItemFacet groups data needed to reference a OneDrive remote item
|
||||
type RemoteItemFacet struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
}
|
||||
|
||||
// FolderFacet groups folder-related data on OneDrive into a single structure
|
||||
type FolderFacet struct {
|
||||
ChildCount int64 `json:"childCount"` // Number of children contained immediately within this container.
|
||||
@@ -143,6 +162,7 @@ type Item struct {
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
@@ -228,3 +248,112 @@ type AsyncOperationStatus struct {
|
||||
PercentageComplete float64 `json:"percentageComplete"` // An float value between 0 and 100 that indicates the percentage complete.
|
||||
Status string `json:"status"` // A string value that maps to an enumeration of possible values about the status of the job. "notStarted | inProgress | completed | updating | failed | deletePending | deleteFailed | waiting"
|
||||
}
|
||||
|
||||
// GetID returns a normalized ID of the item
|
||||
// If DriveID is known it will be prefixed to the ID with # seperator
|
||||
func (i *Item) GetID() string {
|
||||
if i.IsRemote() && i.RemoteItem.ID != "" {
|
||||
return i.RemoteItem.ParentReference.DriveID + "#" + i.RemoteItem.ID
|
||||
} else if i.ParentReference != nil && strings.Index(i.ID, "#") == -1 {
|
||||
return i.ParentReference.DriveID + "#" + i.ID
|
||||
}
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
func (i *Item) GetName() string {
|
||||
if i.IsRemote() && i.RemoteItem.Name != "" {
|
||||
return i.RemoteItem.Name
|
||||
}
|
||||
return i.Name
|
||||
}
|
||||
|
||||
// GetFolder returns a normalized Folder of the item
|
||||
func (i *Item) GetFolder() *FolderFacet {
|
||||
if i.IsRemote() && i.RemoteItem.Folder != nil {
|
||||
return i.RemoteItem.Folder
|
||||
}
|
||||
return i.Folder
|
||||
}
|
||||
|
||||
// GetFile returns a normalized File of the item
|
||||
func (i *Item) GetFile() *FileFacet {
|
||||
if i.IsRemote() && i.RemoteItem.File != nil {
|
||||
return i.RemoteItem.File
|
||||
}
|
||||
return i.File
|
||||
}
|
||||
|
||||
// GetFileSystemInfo returns a normalized FileSystemInfo of the item
|
||||
func (i *Item) GetFileSystemInfo() *FileSystemInfoFacet {
|
||||
if i.IsRemote() && i.RemoteItem.FileSystemInfo != nil {
|
||||
return i.RemoteItem.FileSystemInfo
|
||||
}
|
||||
return i.FileSystemInfo
|
||||
}
|
||||
|
||||
// GetSize returns a normalized Size of the item
|
||||
func (i *Item) GetSize() int64 {
|
||||
if i.IsRemote() && i.RemoteItem.Size != 0 {
|
||||
return i.RemoteItem.Size
|
||||
}
|
||||
return i.Size
|
||||
}
|
||||
|
||||
// GetWebURL returns a normalized WebURL of the item
|
||||
func (i *Item) GetWebURL() string {
|
||||
if i.IsRemote() && i.RemoteItem.WebURL != "" {
|
||||
return i.RemoteItem.WebURL
|
||||
}
|
||||
return i.WebURL
|
||||
}
|
||||
|
||||
// GetCreatedBy returns a normalized CreatedBy of the item
|
||||
func (i *Item) GetCreatedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.CreatedBy
|
||||
}
|
||||
return i.CreatedBy
|
||||
}
|
||||
|
||||
// GetLastModifiedBy returns a normalized LastModifiedBy of the item
|
||||
func (i *Item) GetLastModifiedBy() IdentitySet {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedBy != (IdentitySet{}) {
|
||||
return i.RemoteItem.LastModifiedBy
|
||||
}
|
||||
return i.LastModifiedBy
|
||||
}
|
||||
|
||||
// GetCreatedDateTime returns a normalized CreatedDateTime of the item
|
||||
func (i *Item) GetCreatedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.CreatedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.CreatedDateTime
|
||||
}
|
||||
return i.CreatedDateTime
|
||||
}
|
||||
|
||||
// GetLastModifiedDateTime returns a normalized LastModifiedDateTime of the item
|
||||
func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
if i.IsRemote() && i.RemoteItem.LastModifiedDateTime != (Timestamp{}) {
|
||||
return i.RemoteItem.LastModifiedDateTime
|
||||
}
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
return i.ParentReference
|
||||
}
|
||||
|
||||
// IsRemote checks if item is a remote item
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/onedrive/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -73,8 +74,7 @@ var (
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
|
||||
|
||||
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -83,7 +83,7 @@ func init() {
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
// choose account type
|
||||
fmt.Printf("Choose OneDrive account type?\n")
|
||||
fmt.Printf(" * Say b for a OneDrive business account\n")
|
||||
@@ -92,12 +92,12 @@ func init() {
|
||||
|
||||
if isPersonal {
|
||||
// for personal accounts we don't safe a field about the account
|
||||
err := oauthutil.Config("onedrive", name, oauthPersonalConfig)
|
||||
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
} else {
|
||||
err := oauthutil.ConfigErrorCheck("onedrive", name, func(req *http.Request) oauthutil.AuthError {
|
||||
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
|
||||
var resp oauthutil.AuthError
|
||||
|
||||
resp.Name = req.URL.Query().Get("error")
|
||||
@@ -112,7 +112,7 @@ func init() {
|
||||
}
|
||||
|
||||
// Are we running headless?
|
||||
if config.FileGet(name, config.ConfigAutomatic) != "" {
|
||||
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||
// Yes, okay we are done
|
||||
return
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func init() {
|
||||
Services []serviceResource `json:"value"`
|
||||
}
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, oauthBusinessConfig)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return
|
||||
@@ -171,13 +171,13 @@ func init() {
|
||||
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
|
||||
}
|
||||
|
||||
config.FileSet(name, configResourceURL, foundService)
|
||||
m.Set(configResourceURL, foundService)
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
|
||||
|
||||
// get the token from the inital config
|
||||
// we need to update the token with a resource
|
||||
// specific token we will query now
|
||||
token, err := oauthutil.GetToken(name)
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while getting token: %s", err)
|
||||
return
|
||||
@@ -220,7 +220,7 @@ func init() {
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
|
||||
// finally save them in the config
|
||||
err = oauthutil.PutToken(name, token, true)
|
||||
err = oauthutil.PutToken(name, m, token, true)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Error while setting token: %s", err)
|
||||
}
|
||||
@@ -228,20 +228,30 @@ func init() {
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Microsoft App Client Id - leave blank normally.",
|
||||
Help: "Microsoft App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
||||
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ResourceURL string `config:"resource_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -325,6 +335,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
return info, resp, err
|
||||
}
|
||||
|
||||
@@ -343,26 +354,35 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
// get the resource URL from the config file0
|
||||
resourceURL := config.FileGet(name, configResourceURL, "")
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||
}
|
||||
// if we have a resource URL it's a business account otherwise a personal one
|
||||
isBusiness := opt.ResourceURL != ""
|
||||
var rootURL string
|
||||
var oauthConfig *oauth2.Config
|
||||
if resourceURL == "" {
|
||||
if !isBusiness {
|
||||
// personal account setup
|
||||
oauthConfig = oauthPersonalConfig
|
||||
rootURL = rootURLPersonal
|
||||
} else {
|
||||
// business account setup
|
||||
oauthConfig = oauthBusinessConfig
|
||||
rootURL = resourceURL + "_api/v2.0/drives/me"
|
||||
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
|
||||
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
|
||||
|
||||
// update the URL in the AuthOptions
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", resourceURL)
|
||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
}
|
||||
@@ -370,9 +390,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
isBusiness: resourceURL != "",
|
||||
isBusiness: isBusiness,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -482,21 +503,18 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if info.Folder == nil {
|
||||
if info.GetFolder() == nil {
|
||||
return "", false, errors.New("found file when looking for folder")
|
||||
}
|
||||
return info.ID, true, nil
|
||||
return info.GetID(), true, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
func (f *Fs) CreateDir(dirID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/items/" + pathID + "/children",
|
||||
}
|
||||
opts := newOptsCall(dirID, "POST", "/children")
|
||||
mkdir := api.CreateItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
@@ -509,8 +527,9 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.ID, nil
|
||||
return info.GetID(), nil
|
||||
}
|
||||
|
||||
// list the objects into the function supplied
|
||||
@@ -527,10 +546,8 @@ type listAllFn func(*api.Item) bool
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
// Top parameter asks for bigger pages of data
|
||||
// https://dev.onedrive.com/odata/optional-query-parameters.htm
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/items/" + dirID + "/children?top=1000",
|
||||
}
|
||||
opts := newOptsCall(dirID, "GET", "/children?top=1000")
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
var result api.ListChildrenResponse
|
||||
@@ -547,7 +564,7 @@ OUTER:
|
||||
}
|
||||
for i := range result.Value {
|
||||
item := &result.Value[i]
|
||||
isFolder := item.Folder != nil
|
||||
isFolder := item.GetFolder() != nil
|
||||
if isFolder {
|
||||
if filesOnly {
|
||||
continue
|
||||
@@ -560,7 +577,7 @@ OUTER:
|
||||
if item.Deleted != nil {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = restoreReservedChars(item.GetName())
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -595,13 +612,15 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Folder != nil {
|
||||
remote := path.Join(dir, info.GetName())
|
||||
folder := info.GetFolder()
|
||||
if folder != nil {
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, time.Time(info.LastModifiedDateTime)).SetID(info.ID)
|
||||
if info.Folder != nil {
|
||||
d.SetItems(info.Folder.ChildCount)
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
if folder != nil {
|
||||
d.SetItems(folder.ChildCount)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
} else {
|
||||
@@ -674,11 +693,9 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/items/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
opts := newOptsCall(id, "DELETE", "")
|
||||
opts.NoResponse = true
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -814,17 +831,17 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/items/" + srcObj.id + "/action.copy",
|
||||
ExtraHeaders: map[string]string{"Prefer": "respond-async"},
|
||||
NoResponse: true,
|
||||
}
|
||||
opts := newOptsCall(srcObj.id, "POST", "/action.copy")
|
||||
opts.ExtraHeaders = map[string]string{"Prefer": "respond-async"}
|
||||
opts.NoResponse = true
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copy := api.CopyItemRequest{
|
||||
Name: &replacedLeaf,
|
||||
ParentReference: api.ItemReference{
|
||||
ID: directoryID,
|
||||
ID: id,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
@@ -891,14 +908,14 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/items/" + srcObj.id,
|
||||
}
|
||||
opts := newOptsCall(srcObj.id, "PATCH", "")
|
||||
|
||||
id, _, _ := parseDirID(directoryID)
|
||||
|
||||
move := api.MoveItemRequest{
|
||||
Name: replaceReservedChars(leaf),
|
||||
ParentReference: &api.ItemReference{
|
||||
ID: directoryID,
|
||||
ID: id,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
@@ -1013,35 +1030,37 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.Folder != nil {
|
||||
if info.GetFolder() != nil {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q", o.remote)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.size = info.GetSize()
|
||||
|
||||
// Docs: https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
|
||||
//
|
||||
// We use SHA1 for onedrive personal and QuickXorHash for onedrive for business
|
||||
if info.File != nil {
|
||||
o.mimeType = info.File.MimeType
|
||||
if info.File.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(info.File.Hashes.Sha1Hash)
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if info.File.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(info.File.Hashes.QuickXorHash)
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", info.File.Hashes.QuickXorHash, err)
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
}
|
||||
if info.FileSystemInfo != nil {
|
||||
o.modTime = time.Time(info.FileSystemInfo.LastModifiedDateTime)
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
if fileSystemInfo != nil {
|
||||
o.modTime = time.Time(fileSystemInfo.LastModifiedDateTime)
|
||||
} else {
|
||||
o.modTime = time.Time(info.LastModifiedDateTime)
|
||||
o.modTime = time.Time(info.GetLastModifiedDateTime())
|
||||
}
|
||||
o.id = info.ID
|
||||
o.id = info.GetID()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1080,9 +1099,20 @@ func (o *Object) ModTime() time.Time {
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
|
||||
}
|
||||
}
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
@@ -1119,11 +1149,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/items/" + o.id + "/content",
|
||||
Options: options,
|
||||
}
|
||||
opts := newOptsCall(o.id, "GET", "/content")
|
||||
opts.Options = options
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
@@ -1141,9 +1169,20 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
|
||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
id, drive, rootURL := parseDirID(directoryID)
|
||||
var opts rest.Opts
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + ":/" + rest.URLPathEscape(leaf) + ":/upload.createSession",
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/upload.createSession",
|
||||
}
|
||||
}
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
@@ -1204,10 +1243,6 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if chunkSize%(320*1024) != 0 {
|
||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(modTime)
|
||||
@@ -1231,7 +1266,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
for remaining > 0 {
|
||||
n := int64(chunkSize)
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
if remaining < n {
|
||||
n = remaining
|
||||
}
|
||||
@@ -1251,11 +1286,24 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// uploadSinglepart uploads a file as a single part
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
var opts rest.Opts
|
||||
_, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
||||
_, drive, rootURL := parseDirID(directoryID)
|
||||
if drive != "" {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
}
|
||||
}
|
||||
// for go1.8 (see release notes) we must nil the Body if we want a
|
||||
// "Content-Length: 0" header which onedrive requires for all files.
|
||||
@@ -1269,6 +1317,7 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1315,6 +1364,30 @@ func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
func newOptsCall(id string, method string, route string) (opts rest.Opts) {
|
||||
id, drive, rootURL := parseDirID(id)
|
||||
|
||||
if drive != "" {
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
RootURL: rootURL,
|
||||
Path: "/" + drive + "/items/" + id + route,
|
||||
}
|
||||
}
|
||||
return rest.Opts{
|
||||
Method: method,
|
||||
Path: "/items/" + id + route,
|
||||
}
|
||||
}
|
||||
|
||||
func parseDirID(ID string) (string, string, string) {
|
||||
if strings.Index(ID, "#") >= 0 {
|
||||
s := strings.Split(ID, "#")
|
||||
return s[1], s[0], sharedURL
|
||||
}
|
||||
return ID, "", ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -37,23 +38,30 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username",
|
||||
Name: "username",
|
||||
Help: "Username",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UserName string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// Fs represents a remote server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
username string // account name
|
||||
password string // auth key0
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *pacer.Pacer // To pace and retry the API calls
|
||||
session UserSessionInfo // contains the session data
|
||||
@@ -110,27 +118,31 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
username := config.FileGet(name, "username")
|
||||
if username == "" {
|
||||
if opt.UserName == "" {
|
||||
return nil, errors.New("username not found")
|
||||
}
|
||||
password, err := obscure.Reveal(config.FileGet(name, "password"))
|
||||
opt.Password, err = obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, errors.New("password coudl not revealed")
|
||||
return nil, errors.New("password could not revealed")
|
||||
}
|
||||
if password == "" {
|
||||
if opt.Password == "" {
|
||||
return nil, errors.New("password not found")
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
username: username,
|
||||
password: password,
|
||||
root: root,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, "0", f)
|
||||
@@ -141,7 +153,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
// get sessionID
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
account := Account{Username: username, Password: password}
|
||||
account := Account{Username: opt.UserName, Password: opt.Password}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/pcloud/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -65,26 +67,31 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("pcloud", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("pcloud", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Pcloud App Client Id - leave blank normally.",
|
||||
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Pcloud App Client Secret - leave blank normally.",
|
||||
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
@@ -229,9 +236,15 @@ func errorHandler(resp *http.Response) error {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||
}
|
||||
@@ -239,6 +252,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -34,49 +35,43 @@ func init() {
|
||||
Description: "QingCloud Object Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
},
|
||||
},
|
||||
Name: "env_auth",
|
||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter QingStor credentials in the next step",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||
}, {
|
||||
Name: "zone",
|
||||
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "pek3a",
|
||||
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
},
|
||||
{
|
||||
Value: "sh1a",
|
||||
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
},
|
||||
{
|
||||
Value: "gd2a",
|
||||
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
},
|
||||
},
|
||||
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "pek3a",
|
||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||
}, {
|
||||
Value: "sh1a",
|
||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||
}, {
|
||||
Value: "gd2a",
|
||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||
}},
|
||||
}, {
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
|
||||
Name: "connection_retries",
|
||||
Help: "Number of connnection retries.",
|
||||
Default: 3,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -95,17 +90,28 @@ func timestampToTime(tp int64) time.Time {
|
||||
return tm.UTC()
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Zone string `config:"zone"`
|
||||
ConnectionRetries int `config:"connection_retries"`
|
||||
}
|
||||
|
||||
// Fs represents a remote qingstor server
|
||||
type Fs struct {
|
||||
name string // The name of the remote
|
||||
root string // The root is a subdir, is a special object
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
zone string // The zone we are working on
|
||||
bucket string // The bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
root string // The root is a subdir, is a special object
|
||||
features *fs.Features // optional features
|
||||
svc *qs.Service // The connection to the qingstor server
|
||||
}
|
||||
|
||||
// Object describes a qingstor object
|
||||
@@ -165,12 +171,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
||||
}
|
||||
|
||||
// qsConnection makes a connection to qingstor
|
||||
func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
accessKeyID := config.FileGet(name, "access_key_id")
|
||||
secretAccessKey := config.FileGet(name, "secret_access_key")
|
||||
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
accessKeyID := opt.AccessKeyID
|
||||
secretAccessKey := opt.SecretAccessKey
|
||||
|
||||
switch {
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
case opt.EnvAuth:
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case accessKeyID == "" && secretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -184,7 +190,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
host := "qingstor.com"
|
||||
port := 443
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint", "")
|
||||
endpoint := opt.Endpoint
|
||||
if endpoint != "" {
|
||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||
|
||||
@@ -204,48 +210,49 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
||||
|
||||
}
|
||||
|
||||
connectionRetries := 3
|
||||
retries := config.FileGet(name, "connection_retries", "")
|
||||
if retries != "" {
|
||||
connectionRetries, _ = strconv.Atoi(retries)
|
||||
}
|
||||
|
||||
cf, err := qsConfig.NewDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cf.AccessKeyID = accessKeyID
|
||||
cf.SecretAccessKey = secretAccessKey
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = connectionRetries
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
svc, _ := qs.Init(cf)
|
||||
|
||||
return svc, err
|
||||
return qs.Init(cf)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, key, err := qsParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
svc, err := qsServiceConnection(name)
|
||||
svc, err := qsServiceConnection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone := config.FileGet(name, "zone")
|
||||
if zone == "" {
|
||||
zone = "pek3a"
|
||||
if opt.Zone == "" {
|
||||
opt.Zone = "pek3a"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
zone: zone,
|
||||
root: key,
|
||||
bucket: bucket,
|
||||
opt: *opt,
|
||||
svc: svc,
|
||||
zone: opt.Zone,
|
||||
bucket: bucket,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -258,7 +265,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f.root += "/"
|
||||
}
|
||||
//Check to see if the object exists
|
||||
bucketInit, err := svc.Bucket(bucket, zone)
|
||||
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
185
backend/s3/s3.go
185
backend/s3/s3.go
@@ -37,8 +37,8 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
@@ -82,8 +82,9 @@ func init() {
|
||||
Help: "Any other S3 compatible provider",
|
||||
}},
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.",
|
||||
Name: "env_auth",
|
||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter AWS credentials in the next step",
|
||||
@@ -93,10 +94,10 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
||||
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
@@ -146,7 +147,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.",
|
||||
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -293,7 +294,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
||||
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -340,7 +341,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public. For on-prem COS, do not make a selection from this list, hit enter",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
|
||||
Provider: "IBMCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-standard",
|
||||
@@ -441,7 +442,7 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region. Leave blank if not sure. Used when creating buckets only.",
|
||||
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
@@ -518,10 +519,28 @@ func init() {
|
||||
Value: "ONEZONE_IA",
|
||||
Help: "One Zone Infrequent Access storage class",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Chunk size to use for uploading",
|
||||
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: "Concurrency for multipart uploads.",
|
||||
Default: 2,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&s3ChunkSize, "s3-chunk-size", "", "Chunk size to use for uploading")
|
||||
}
|
||||
|
||||
// Constants
|
||||
@@ -534,31 +553,36 @@ const (
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
||||
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA)")
|
||||
s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
s3DisableChecksum = flags.BoolP("s3-disable-checksum", "", false, "Don't store MD5 checksum with object metadata")
|
||||
s3UploadConcurrency = flags.IntP("s3-upload-concurrency", "", 2, "Concurrency for multipart uploads")
|
||||
)
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
acl string // ACL for new buckets / objects
|
||||
locationConstraint string // location constraint of new buckets
|
||||
sse string // the type of server-side encryption
|
||||
storageClass string // storage class
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketDeleted bool // true if we have deleted the bucket
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -620,12 +644,12 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
// Make the auth
|
||||
v := credentials.Value{
|
||||
AccessKeyID: config.FileGet(name, "access_key_id"),
|
||||
SecretAccessKey: config.FileGet(name, "secret_access_key"),
|
||||
SessionToken: config.FileGet(name, "session_token"),
|
||||
AccessKeyID: opt.AccessKeyID,
|
||||
SecretAccessKey: opt.SecretAccessKey,
|
||||
SessionToken: opt.SessionToken,
|
||||
}
|
||||
|
||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||
@@ -660,7 +684,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
|
||||
switch {
|
||||
case config.FileGetBool(name, "env_auth", false):
|
||||
case opt.EnvAuth:
|
||||
// No need for empty checks if "env_auth" is true
|
||||
case v.AccessKeyID == "" && v.SecretAccessKey == "":
|
||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||
@@ -671,26 +695,24 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
return nil, nil, errors.New("secret_access_key not found")
|
||||
}
|
||||
|
||||
endpoint := config.FileGet(name, "endpoint")
|
||||
region := config.FileGet(name, "region")
|
||||
if region == "" && endpoint == "" {
|
||||
endpoint = "https://s3.amazonaws.com/"
|
||||
if opt.Region == "" && opt.Endpoint == "" {
|
||||
opt.Endpoint = "https://s3.amazonaws.com/"
|
||||
}
|
||||
if region == "" {
|
||||
region = "us-east-1"
|
||||
if opt.Region == "" {
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithRegion(region).
|
||||
WithRegion(opt.Region).
|
||||
WithMaxRetries(maxRetries).
|
||||
WithCredentials(cred).
|
||||
WithEndpoint(endpoint).
|
||||
WithEndpoint(opt.Endpoint).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(true)
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
ses := session.New()
|
||||
c := s3.New(ses, awsConfig)
|
||||
if region == "other-v2-signature" {
|
||||
fs.Debugf(name, "Using v2 auth")
|
||||
if opt.Region == "other-v2-signature" {
|
||||
fs.Debugf(nil, "Using v2 auth")
|
||||
signer := func(req *request.Request) {
|
||||
// Ignore AnonymousCredentials object
|
||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||
@@ -706,40 +728,37 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
}
|
||||
bucket, directory, err := s3ParsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, ses, err := s3Connection(name)
|
||||
c, ses, err := s3Connection(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
acl: config.FileGet(name, "acl"),
|
||||
root: directory,
|
||||
locationConstraint: config.FileGet(name, "location_constraint"),
|
||||
sse: config.FileGet(name, "server_side_encryption"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
name: name,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
bucket: bucket,
|
||||
ses: ses,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if *s3ACL != "" {
|
||||
f.acl = *s3ACL
|
||||
}
|
||||
if *s3StorageClass != "" {
|
||||
f.storageClass = *s3StorageClass
|
||||
}
|
||||
if s3ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||
return nil, errors.Errorf("s3 chunk size must be >= %v", fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||
}
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
@@ -864,7 +883,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
remote := key[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
|
||||
if recurse {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
remote = remote[:len(remote)-1]
|
||||
err = fn(remote, &s3.Object{Key: &remote}, true)
|
||||
@@ -1064,11 +1083,11 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &f.bucket,
|
||||
ACL: &f.acl,
|
||||
ACL: &f.opt.ACL,
|
||||
}
|
||||
if f.locationConstraint != "" {
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
LocationConstraint: &f.locationConstraint,
|
||||
LocationConstraint: &f.opt.LocationConstraint,
|
||||
}
|
||||
}
|
||||
_, err := f.c.CreateBucket(&req)
|
||||
@@ -1297,7 +1316,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||
req := s3.CopyObjectInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.acl,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
ContentType: &mimeType,
|
||||
CopySource: aws.String(pathEscape(sourceKey)),
|
||||
@@ -1353,10 +1372,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
size := src.Size()
|
||||
|
||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||
u.Concurrency = *s3UploadConcurrency
|
||||
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||
u.LeavePartsOnError = false
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(s3ChunkSize)
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
@@ -1376,7 +1395,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||
}
|
||||
|
||||
if !*s3DisableChecksum && size > uploader.PartSize {
|
||||
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||
hash, err := src.Hash(hash.MD5)
|
||||
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
@@ -1394,18 +1413,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
key := o.fs.root + o.remote
|
||||
req := s3manager.UploadInput{
|
||||
Bucket: &o.fs.bucket,
|
||||
ACL: &o.fs.acl,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &key,
|
||||
Body: in,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
//ContentLength: &size,
|
||||
}
|
||||
if o.fs.sse != "" {
|
||||
req.ServerSideEncryption = &o.fs.sse
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.storageClass != "" {
|
||||
req.StorageClass = &o.fs.storageClass
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
_, err = uploader.Upload(&req)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,7 +20,8 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -38,10 +39,6 @@ const (
|
||||
|
||||
var (
|
||||
currentUser = readCurrentUser()
|
||||
|
||||
// Flags
|
||||
sftpAskPassword = flags.BoolP("sftp-ask-password", "", false, "Allow asking for SFTP password when needed.")
|
||||
sshPathOverride = flags.StringP("ssh-path-override", "", "", "Override path used by SSH connection.")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -52,32 +49,28 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
Optional: true,
|
||||
Name: "port",
|
||||
Help: "SSH port, leave blank to use default (22)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
Optional: true,
|
||||
Name: "key_file",
|
||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the user of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Optional: true,
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
@@ -88,30 +81,56 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "disable_hashcheck",
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available. Leave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
Optional: true,
|
||||
Name: "disable_hashcheck",
|
||||
Default: false,
|
||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||
}, {
|
||||
Name: "ask_password",
|
||||
Default: false,
|
||||
Help: "Allow asking for SFTP password when needed.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "path_override",
|
||||
Default: "",
|
||||
Help: "Override path used by SSH connection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
Default: true,
|
||||
Help: "Set the modified time on the remote if set.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Port string `config:"port"`
|
||||
Pass string `config:"pass"`
|
||||
KeyFile string `config:"key_file"`
|
||||
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
PathOverride string `config:"path_override"`
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
host string
|
||||
port string
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
hashcheckDisabled bool
|
||||
setModtime bool
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
name string
|
||||
root string
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
config *ssh.ClientConfig
|
||||
url string
|
||||
mkdirLock *stringLock
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -197,7 +216,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = Dial("tcp", f.host+":"+f.port, f.config)
|
||||
c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||
}
|
||||
@@ -270,35 +289,33 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
user := config.FileGet(name, "user")
|
||||
host := config.FileGet(name, "host")
|
||||
port := config.FileGet(name, "port")
|
||||
pass := config.FileGet(name, "pass")
|
||||
keyFile := config.FileGet(name, "key_file")
|
||||
insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
|
||||
hashcheckDisabled := config.FileGetBool(name, "disable_hashcheck")
|
||||
setModtime := config.FileGetBool(name, "set_modtime", true)
|
||||
if user == "" {
|
||||
user = currentUser
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if port == "" {
|
||||
port = "22"
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
if opt.Port == "" {
|
||||
opt.Port = "22"
|
||||
}
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: user,
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
Timeout: fs.Config.ConnectTimeout,
|
||||
}
|
||||
|
||||
if insecureCipher {
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
}
|
||||
|
||||
// Add ssh agent-auth if no password or file specified
|
||||
if pass == "" && keyFile == "" {
|
||||
if opt.Pass == "" && opt.KeyFile == "" {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||
@@ -311,8 +328,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Load key file if specified
|
||||
if keyFile != "" {
|
||||
key, err := ioutil.ReadFile(keyFile)
|
||||
if opt.KeyFile != "" {
|
||||
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read private key file")
|
||||
}
|
||||
@@ -324,8 +341,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Auth from password if specified
|
||||
if pass != "" {
|
||||
clearpass, err := obscure.Reveal(pass)
|
||||
if opt.Pass != "" {
|
||||
clearpass, err := obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -333,23 +350,20 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Ask for password if none was defined and we're allowed to
|
||||
if pass == "" && *sftpAskPassword {
|
||||
if opt.Pass == "" && opt.AskPassword {
|
||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||
clearpass := config.ReadPassword()
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
config: sshConfig,
|
||||
host: host,
|
||||
port: port,
|
||||
url: "sftp://" + user + "@" + host + ":" + port + "/" + root,
|
||||
hashcheckDisabled: hashcheckDisabled,
|
||||
setModtime: setModtime,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -663,7 +677,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return *f.cachedHashes
|
||||
}
|
||||
|
||||
if f.hashcheckDisabled {
|
||||
if f.opt.DisableHashCheck {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
@@ -758,8 +772,8 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(o.path())
|
||||
if *sshPathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(*sshPathOverride, o.remote))
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
if err != nil {
|
||||
@@ -852,7 +866,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
if o.fs.setModtime {
|
||||
if o.fs.opt.SetModTime {
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -31,11 +31,6 @@ const (
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -43,8 +38,9 @@ func init() {
|
||||
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "env_auth",
|
||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||
Name: "env_auth",
|
||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "false",
|
||||
@@ -107,11 +103,13 @@ func init() {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||
Default: 0,
|
||||
}, {
|
||||
Name: "endpoint_type",
|
||||
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
||||
Name: "endpoint_type",
|
||||
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
||||
Default: "public",
|
||||
Examples: []fs.OptionExample{{
|
||||
Help: "Public (default, choose this if not sure)",
|
||||
Value: "public",
|
||||
@@ -122,10 +120,32 @@ func init() {
|
||||
Help: "Admin",
|
||||
Value: "admin",
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: "Above this size files will be chunked into a _segments container.",
|
||||
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
User string `config:"user"`
|
||||
Key string `config:"key"`
|
||||
Auth string `config:"auth"`
|
||||
UserID string `config:"user_id"`
|
||||
Domain string `config:"domain"`
|
||||
Tenant string `config:"tenant"`
|
||||
TenantID string `config:"tenant_id"`
|
||||
TenantDomain string `config:"tenant_domain"`
|
||||
Region string `config:"region"`
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -133,6 +153,7 @@ type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
c *swift.Connection // the connection to the swift server
|
||||
container string // the container we are working on
|
||||
containerOKMu sync.Mutex // mutex to protect container OK
|
||||
@@ -195,27 +216,27 @@ func parsePath(path string) (container, directory string, err error) {
|
||||
}
|
||||
|
||||
// swiftConnection makes a connection to swift
|
||||
func swiftConnection(name string) (*swift.Connection, error) {
|
||||
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
c := &swift.Connection{
|
||||
// Keep these in the same order as the Config for ease of checking
|
||||
UserName: config.FileGet(name, "user"),
|
||||
ApiKey: config.FileGet(name, "key"),
|
||||
AuthUrl: config.FileGet(name, "auth"),
|
||||
UserId: config.FileGet(name, "user_id"),
|
||||
Domain: config.FileGet(name, "domain"),
|
||||
Tenant: config.FileGet(name, "tenant"),
|
||||
TenantId: config.FileGet(name, "tenant_id"),
|
||||
TenantDomain: config.FileGet(name, "tenant_domain"),
|
||||
Region: config.FileGet(name, "region"),
|
||||
StorageUrl: config.FileGet(name, "storage_url"),
|
||||
AuthToken: config.FileGet(name, "auth_token"),
|
||||
AuthVersion: config.FileGetInt(name, "auth_version", 0),
|
||||
EndpointType: swift.EndpointType(config.FileGet(name, "endpoint_type", "public")),
|
||||
UserName: opt.User,
|
||||
ApiKey: opt.Key,
|
||||
AuthUrl: opt.Auth,
|
||||
UserId: opt.UserID,
|
||||
Domain: opt.Domain,
|
||||
Tenant: opt.Tenant,
|
||||
TenantId: opt.TenantID,
|
||||
TenantDomain: opt.TenantDomain,
|
||||
Region: opt.Region,
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
if config.FileGetBool(name, "env_auth", false) {
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read environment variables")
|
||||
@@ -251,13 +272,14 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
||||
//
|
||||
// if noCheckContainer is set then the Fs won't check the container
|
||||
// exists before creating it.
|
||||
func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||
container, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
container: container,
|
||||
segmentsContainer: container + "_segments",
|
||||
@@ -288,12 +310,19 @@ func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContaine
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
c, err := swiftConnection(name)
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFsWithConnection(name, root, c, false)
|
||||
|
||||
c, err := swiftConnection(opt, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFsWithConnection(opt, name, root, c, false)
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
@@ -871,7 +900,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
||||
break
|
||||
}
|
||||
n := int64(chunkSize)
|
||||
n := int64(o.fs.opt.ChunkSize)
|
||||
if size != -1 {
|
||||
n = min(left, n)
|
||||
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
||||
@@ -921,7 +950,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
contentType := fs.MimeType(src)
|
||||
headers := m.ObjectHeaders()
|
||||
uniquePrefix := ""
|
||||
if size > int64(chunkSize) || size == -1 {
|
||||
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -32,6 +32,8 @@ import (
|
||||
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
@@ -56,15 +58,14 @@ func init() {
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "vendor",
|
||||
Help: "Name of the Webdav site/service/software you are using",
|
||||
Optional: false,
|
||||
Name: "vendor",
|
||||
Help: "Name of the Webdav site/service/software you are using",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "nextcloud",
|
||||
Help: "Nextcloud",
|
||||
@@ -79,30 +80,37 @@ func init() {
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name",
|
||||
Optional: true,
|
||||
Name: "user",
|
||||
Help: "User name",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Optional: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (eg a Macaroon)",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
URL string `config:"url"`
|
||||
Vendor string `config:"vendor"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *pacer.Pacer // pacer for API calls
|
||||
user string // username
|
||||
pass string // password
|
||||
vendor string // name of the vendor
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
@@ -182,6 +190,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
ExtraHeaders: map[string]string{
|
||||
"Depth": "1",
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
var result api.Multistatus
|
||||
var resp *http.Response
|
||||
@@ -191,7 +200,13 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Prop, err error) {
|
||||
})
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
switch apiErr.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther:
|
||||
// Some sort of redirect - go doesn't deal with these properly (it resets
|
||||
// the method to GET). However we can assume that if it was redirected the
|
||||
// object was not found.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
@@ -253,26 +268,36 @@ func (o *Object) filePath() string {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
if pass != "" {
|
||||
bearerToken := config.FileGet(name, "bearer_token")
|
||||
if !strings.HasSuffix(opt.URL, "/") {
|
||||
opt.URL += "/"
|
||||
}
|
||||
if opt.Pass != "" {
|
||||
var err error
|
||||
pass, err = obscure.Reveal(pass)
|
||||
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||
}
|
||||
}
|
||||
vendor := config.FileGet(name, "vendor")
|
||||
if opt.Vendor == "" {
|
||||
opt.Vendor = "other"
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse the endpoint
|
||||
u, err := url.Parse(endpoint)
|
||||
u, err := url.Parse(opt.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -280,24 +305,28 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()).SetUserPass(user, pass),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
user: user,
|
||||
pass: pass,
|
||||
precision: fs.ModTimeNotSupported,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if user != "" || pass != "" {
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
} else if bearerToken != "" {
|
||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||
}
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
err = f.setQuirks(vendor)
|
||||
err = f.setQuirks(opt.Vendor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root != "" {
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
@@ -321,10 +350,6 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(vendor string) error {
|
||||
if vendor == "" {
|
||||
vendor = "other"
|
||||
}
|
||||
f.vendor = vendor
|
||||
switch vendor {
|
||||
case "owncloud":
|
||||
f.canStream = true
|
||||
@@ -337,7 +362,7 @@ func (f *Fs) setQuirks(vendor string) error {
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
|
||||
spCk := odrvcookie.New(f.user, f.pass, f.endpointURL)
|
||||
spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL)
|
||||
spCookies, err := spCk.Cookies()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -542,6 +567,11 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
// mkParentDir makes the parent of the native path dirPath if
|
||||
// necessary and any directories above that
|
||||
func (f *Fs) mkParentDir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// chop off trailing / if it exists
|
||||
if strings.HasSuffix(dirPath, "/") {
|
||||
dirPath = dirPath[:len(dirPath)-1]
|
||||
}
|
||||
parent := path.Dir(dirPath)
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
@@ -551,10 +581,15 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
||||
|
||||
// mkdir makes the directory and parents using native paths
|
||||
func (f *Fs) mkdir(dirPath string) error {
|
||||
// defer log.Trace(dirPath, "")("")
|
||||
// We assume the root is already ceated
|
||||
if dirPath == "" {
|
||||
return nil
|
||||
}
|
||||
// Collections must end with /
|
||||
if !strings.HasSuffix(dirPath, "/") {
|
||||
dirPath += "/"
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: dirPath,
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
yandex "github.com/ncw/rclone/backend/yandex/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
@@ -51,29 +53,35 @@ func init() {
|
||||
Name: "yandex",
|
||||
Description: "Yandex Disk",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
err := oauthutil.Config("yandex", name, oauthConfig)
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("yandex", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Yandex Client Id - leave blank normally.",
|
||||
Help: "Yandex Client Id\nLeave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Yandex Client Secret - leave blank normally.",
|
||||
Help: "Yandex Client Secret\nLeave blank normally.",
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
type Fs struct {
|
||||
name string
|
||||
root string //root path
|
||||
root string // root path
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
yd *yandex.Client // client for rest api
|
||||
diskRoot string //root path with "disk:/" container name
|
||||
diskRoot string // root path with "disk:/" container name
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
@@ -109,11 +117,9 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// read access token from ConfigFile string
|
||||
func getAccessToken(name string) (*oauth2.Token, error) {
|
||||
// Read the token from the config file
|
||||
tokenConfig := config.FileGet(name, "token")
|
||||
func getAccessToken(opt *Options) (*oauth2.Token, error) {
|
||||
//Get access token from config string
|
||||
decoder := json.NewDecoder(strings.NewReader(tokenConfig))
|
||||
decoder := json.NewDecoder(strings.NewReader(opt.Token))
|
||||
var result *oauth2.Token
|
||||
err := decoder.Decode(&result)
|
||||
if err != nil {
|
||||
@@ -123,9 +129,16 @@ func getAccessToken(name string) (*oauth2.Token, error) {
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//read access token from config
|
||||
token, err := getAccessToken(name)
|
||||
token, err := getAccessToken(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -135,6 +148,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
yd: yandexDisk,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
@@ -66,28 +67,33 @@ var archFlags = map[string][]string{
|
||||
}
|
||||
|
||||
// runEnv - run a shell command with env
|
||||
func runEnv(args, env []string) {
|
||||
func runEnv(args, env []string) error {
|
||||
if *debug {
|
||||
args = append([]string{"echo"}, args...)
|
||||
}
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if env != nil {
|
||||
cmd.Env = append(os.Environ(), env...)
|
||||
}
|
||||
if *debug {
|
||||
log.Printf("args = %v, env = %v\n", args, cmd.Env)
|
||||
}
|
||||
err := cmd.Run()
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to run %v: %v", args, err)
|
||||
log.Print("----------------------------")
|
||||
log.Printf("Failed to run %v: %v", args, err)
|
||||
log.Printf("Command output was:\n%s", out)
|
||||
log.Print("----------------------------")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// run a shell command
|
||||
func run(args ...string) {
|
||||
runEnv(args, nil)
|
||||
err := runEnv(args, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Exiting after error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// chdir or die
|
||||
@@ -160,8 +166,8 @@ func buildDebAndRpm(dir, version, goarch string) []string {
|
||||
return artifacts
|
||||
}
|
||||
|
||||
// build the binary in dir
|
||||
func compileArch(version, goos, goarch, dir string) {
|
||||
// build the binary in dir returning success or failure
|
||||
func compileArch(version, goos, goarch, dir string) bool {
|
||||
log.Printf("Compiling %s/%s", goos, goarch)
|
||||
output := filepath.Join(dir, "rclone")
|
||||
if goos == "windows" {
|
||||
@@ -191,7 +197,11 @@ func compileArch(version, goos, goarch, dir string) {
|
||||
if flags, ok := archFlags[goarch]; ok {
|
||||
env = append(env, flags...)
|
||||
}
|
||||
runEnv(args, env)
|
||||
err = runEnv(args, env)
|
||||
if err != nil {
|
||||
log.Printf("Error compiling %s/%s: %v", goos, goarch, err)
|
||||
return false
|
||||
}
|
||||
if !*compileOnly {
|
||||
artifacts := []string{buildZip(dir)}
|
||||
// build a .deb and .rpm if appropriate
|
||||
@@ -207,6 +217,7 @@ func compileArch(version, goos, goarch, dir string) {
|
||||
run("rm", "-rf", dir)
|
||||
}
|
||||
log.Printf("Done compiling %s/%s", goos, goarch)
|
||||
return true
|
||||
}
|
||||
|
||||
func compile(version string) {
|
||||
@@ -231,6 +242,8 @@ func compile(version string) {
|
||||
log.Fatalf("Bad -exclude regexp: %v", err)
|
||||
}
|
||||
compiled := 0
|
||||
var failuresMu sync.Mutex
|
||||
var failures []string
|
||||
for _, osarch := range osarches {
|
||||
if excludeRe.MatchString(osarch) || !includeRe.MatchString(osarch) {
|
||||
continue
|
||||
@@ -246,13 +259,22 @@ func compile(version string) {
|
||||
}
|
||||
dir := filepath.Join("rclone-" + version + "-" + userGoos + "-" + goarch)
|
||||
run <- func() {
|
||||
compileArch(version, goos, goarch, dir)
|
||||
if !compileArch(version, goos, goarch, dir) {
|
||||
failuresMu.Lock()
|
||||
failures = append(failures, goos+"/"+goarch)
|
||||
failuresMu.Unlock()
|
||||
}
|
||||
}
|
||||
compiled++
|
||||
}
|
||||
close(run)
|
||||
wg.Wait()
|
||||
log.Printf("Compiled %d arches in %v", compiled, time.Since(start))
|
||||
if len(failures) > 0 {
|
||||
sort.Strings(failures)
|
||||
log.Printf("%d compile failures:\n %s\n", len(failures), strings.Join(failures, "\n "))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/cmd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -27,17 +25,6 @@ Print cache stats for a remote in JSON format
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
_, configName, _, err := fs.ParseRemote(args[0])
|
||||
if err != nil {
|
||||
fs.Errorf("cachestats", "%s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !config.FileGetBool(configName, "read_only", false) {
|
||||
config.FileSet(configName, "read_only", "true")
|
||||
defer config.FileDeleteKey(configName, "read_only")
|
||||
}
|
||||
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var fsCache *cache.Fs
|
||||
|
||||
57
cmd/cmd.go
57
cmd/cmd.go
@@ -16,6 +16,7 @@ import (
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -151,12 +152,12 @@ func ShowVersion() {
|
||||
// It returns a string with the file name if points to a file
|
||||
// otherwise "".
|
||||
func NewFsFile(remote string) (fs.Fs, string) {
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
_, _, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := fsInfo.NewFs(configName, fsPath)
|
||||
f, err := fs.NewFs(remote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
return f, path.Base(fsPath)
|
||||
@@ -245,7 +246,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
// If file exists then srcFileName != "", however if the file
|
||||
// doesn't exist then we assume it is a directory...
|
||||
if srcFileName != "" {
|
||||
dstRemote, dstFileName = fspath.RemoteSplit(dstRemote)
|
||||
dstRemote, dstFileName = fspath.Split(dstRemote)
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
@@ -268,7 +269,7 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
|
||||
// NewFsDstFile creates a new dst fs with a destination file name from the arguments
|
||||
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
||||
dstRemote, dstFileName := fspath.RemoteSplit(args[0])
|
||||
dstRemote, dstFileName := fspath.Split(args[0])
|
||||
if dstRemote == "" {
|
||||
dstRemote = "."
|
||||
}
|
||||
@@ -496,3 +497,51 @@ func resolveExitCode(err error) {
|
||||
os.Exit(exitCodeUsageError)
|
||||
}
|
||||
}
|
||||
|
||||
// AddBackendFlags creates flags for all the backend options
|
||||
func AddBackendFlags() {
|
||||
for _, fsInfo := range fs.Registry {
|
||||
done := map[string]struct{}{}
|
||||
for i := range fsInfo.Options {
|
||||
opt := &fsInfo.Options[i]
|
||||
// Skip if done already (eg with Provider options)
|
||||
if _, doneAlready := done[opt.Name]; doneAlready {
|
||||
continue
|
||||
}
|
||||
done[opt.Name] = struct{}{}
|
||||
// Make a flag from each option
|
||||
name := strings.Replace(opt.Name, "_", "-", -1) // convert snake_case to kebab-case
|
||||
if !opt.NoPrefix {
|
||||
name = fsInfo.Prefix + "-" + name
|
||||
}
|
||||
found := pflag.CommandLine.Lookup(name) != nil
|
||||
if !found {
|
||||
// Take first line of help only
|
||||
help := strings.TrimSpace(opt.Help)
|
||||
if nl := strings.IndexRune(help, '\n'); nl >= 0 {
|
||||
help = help[:nl]
|
||||
}
|
||||
help = strings.TrimSpace(help)
|
||||
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
// Hide on the command line if requested
|
||||
if opt.Hide&fs.OptionHideCommandLine != 0 {
|
||||
flag.Hidden = true
|
||||
}
|
||||
} else {
|
||||
fs.Errorf(nil, "Not adding duplicate flag --%s", name)
|
||||
}
|
||||
//flag.Hidden = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Main runs rclone interpreting flags and commands out of os.Args
|
||||
func Main() {
|
||||
AddBackendFlags()
|
||||
if err := Root.Execute(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,14 +40,14 @@ use it like this
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
fsInfo, configName, _, err := fs.ParseRemote(args[0])
|
||||
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fsInfo.Name != "crypt" {
|
||||
return errors.New("The remote needs to be of type \"crypt\"")
|
||||
}
|
||||
cipher, err := crypt.NewCipher(configName)
|
||||
cipher, err := crypt.NewCipher(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -102,14 +102,14 @@ can be processed line by line as each item is written one to a line.
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var cipher crypt.Cipher
|
||||
if showEncrypted {
|
||||
fsInfo, configName, _, err := fs.ParseRemote(args[0])
|
||||
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
if fsInfo.Name != "crypt" {
|
||||
log.Fatalf("The remote needs to be of type \"crypt\"")
|
||||
}
|
||||
cipher, err = crypt.NewCipher(configName)
|
||||
cipher, err = crypt.NewCipher(config)
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
|
||||
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Rclone
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from:
|
||||
|
||||
* {{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" >}}
|
||||
* {{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" >}} ([See note](/amazonclouddrive/#status))
|
||||
* {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
* {{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
* {{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
|
||||
@@ -7,9 +7,24 @@ date: "2017-06-10"
|
||||
<i class="fa fa-amazon"></i> Amazon Drive
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
Amazon Drive, formerly known as Amazon Cloud Drive, is a cloud storage
|
||||
service run by Amazon for consumers.
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
## Status
|
||||
|
||||
**Important:** rclone supports Amazon Drive only if you have your own
|
||||
set of API keys. Unfortunately the [Amazon Drive developer
|
||||
program](https://developer.amazon.com/amazon-drive) is now closed to
|
||||
new entries so if you don't already have your own set of keys you will
|
||||
not be able to use rclone with Amazon Drive.
|
||||
|
||||
For the history on why rclone no longer has a set of Amazon Drive API
|
||||
keys see [the forum](https://forum.rclone.org/t/rclone-has-been-banned-from-amazon-drive/2314).
|
||||
|
||||
If you happen to know anyone who works at Amazon then please ask them
|
||||
to re-instate rclone into the Amazon Drive developer program - thanks!
|
||||
|
||||
## Setup
|
||||
|
||||
The initial setup for Amazon Drive involves getting a token from
|
||||
Amazon which you need to do in your browser. `rclone config` walks
|
||||
@@ -21,10 +36,8 @@ Amazon credentials out of the source code. The proxy runs in Google's
|
||||
very secure App Engine environment and doesn't store any credentials
|
||||
which pass through it.
|
||||
|
||||
**NB** rclone doesn't not currently have its own Amazon Drive
|
||||
credentials (see [the
|
||||
forum](https://forum.rclone.org/t/rclone-has-been-banned-from-amazon-drive/)
|
||||
for why) so you will either need to have your own `client_id` and
|
||||
Since rclone doesn't currently have its own Amazon Drive credentials
|
||||
so you will either need to have your own `client_id` and
|
||||
`client_secret` with Amazon Drive, or use a a third party ouath proxy
|
||||
in which case you will need to enter `client_id`, `client_secret`,
|
||||
`auth_url` and `token_url`.
|
||||
|
||||
@@ -169,3 +169,6 @@ Contributors
|
||||
* Kasper Byrdal Nielsen <byrdal76@gmail.com>
|
||||
* Benjamin Joseph Dag <bjdag1234@users.noreply.github.com>
|
||||
* themylogin <themylogin@gmail.com>
|
||||
* Onno Zweers <onno.zweers@surfsara.nl>
|
||||
* Jasper Lievisse Adriaanse <jasper@humppa.nl>
|
||||
* sandeepkru <sandeep.ummadi@gmail.com>
|
||||
|
||||
@@ -117,6 +117,36 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
|
||||
chunks only have an MD5 if the source remote was capable of MD5
|
||||
hashes, eg the local disk.
|
||||
|
||||
### Authenticating with Azure Blob Storage
|
||||
|
||||
Rclone has 3 ways of authenticating with Azure Blob Storage:
|
||||
|
||||
#### Account and Key
|
||||
|
||||
This is the most straight forward and least flexible way. Just fill in the `account` and `key` lines and leave the rest blank.
|
||||
|
||||
#### SAS URL
|
||||
|
||||
This can be an account level SAS URL or container level SAS URL
|
||||
|
||||
To use it leave `account`, `key` blank and fill in `sas_url`.
|
||||
|
||||
Account level SAS URL or container level SAS URL can be obtained from Azure portal or Azure Storage Explorer.
|
||||
To get a container level SAS URL right click on a container in the Azure Blob explorer in the Azure portal.
|
||||
|
||||
If You use container level SAS URL, rclone operations are permitted only on particular container, eg
|
||||
|
||||
rclone ls azureblob:container or rclone ls azureblob:
|
||||
|
||||
Since container name already exists in SAS URL, you can leave it empty as well.
|
||||
|
||||
However these will not work
|
||||
|
||||
rclone lsd azureblob:
|
||||
rclone ls azureblob:othercontainer
|
||||
|
||||
This would be useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
Rclone supports multipart uploads with Azure Blob storage. Files
|
||||
|
||||
@@ -30,53 +30,17 @@ n/s/q> n
|
||||
name> remote
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
2 / Amazon S3 (also Dreamhost, Ceph, Minio)
|
||||
\ "s3"
|
||||
3 / Backblaze B2
|
||||
\ "b2"
|
||||
4 / Box
|
||||
\ "box"
|
||||
5 / Dropbox
|
||||
\ "dropbox"
|
||||
6 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
7 / FTP Connection
|
||||
\ "ftp"
|
||||
8 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
9 / Google Drive
|
||||
\ "drive"
|
||||
10 / Hubic
|
||||
\ "hubic"
|
||||
11 / Local Disk
|
||||
\ "local"
|
||||
12 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
13 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
14 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
15 / Pcloud
|
||||
\ "pcloud"
|
||||
16 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
17 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
18 / WebDAV
|
||||
[snip]
|
||||
22 / Webdav
|
||||
\ "webdav"
|
||||
19 / Yandex Disk
|
||||
\ "yandex"
|
||||
20 / http Connection
|
||||
\ "http"
|
||||
[snip]
|
||||
Storage> webdav
|
||||
URL of http host to connect to
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Connect to example.com
|
||||
\ "https://example.com"
|
||||
url> https://example.com/remote.php/webdav/
|
||||
Name of the WebDAV site/service/software you are using
|
||||
Name of the Webdav site/service/software you are using
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Nextcloud
|
||||
\ "nextcloud"
|
||||
@@ -98,13 +62,17 @@ Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Bearer token instead of user/pass (eg a Macaroon)
|
||||
bearer_token>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
type = webdav
|
||||
url = https://example.com/remote.php/webdav/
|
||||
vendor = nextcloud
|
||||
user = user
|
||||
pass = *** ENCRYPTED ***
|
||||
bearer_token =
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -133,6 +101,10 @@ Owncloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Hashes are not supported.
|
||||
|
||||
## Provider notes ##
|
||||
|
||||
See below for notes on specific providers.
|
||||
|
||||
### Owncloud ###
|
||||
|
||||
Click on the settings cog in the bottom right of the page and this
|
||||
@@ -149,7 +121,7 @@ Owncloud does. This [may be
|
||||
fixed](https://github.com/nextcloud/nextcloud-snap/issues/365) in the
|
||||
future.
|
||||
|
||||
## Put.io ##
|
||||
### Put.io ###
|
||||
|
||||
put.io can be accessed in a read only way using webdav.
|
||||
|
||||
@@ -174,9 +146,9 @@ mount.
|
||||
|
||||
For more help see [the put.io webdav docs](http://help.put.io/apps-and-integrations/ftp-and-webdav).
|
||||
|
||||
## Sharepoint ##
|
||||
### Sharepoint ###
|
||||
|
||||
Can be used with Sharepoint provided by OneDrive for Business
|
||||
Rclone can be used with Sharepoint provided by OneDrive for Business
|
||||
or Office365 Education Accounts.
|
||||
This feature is only needed for a few of these Accounts,
|
||||
mostly Office365 Education ones. These accounts are sometimes not
|
||||
@@ -213,4 +185,27 @@ url = https://[YOUR-DOMAIN]-my.sharepoint.com/personal/[YOUR-EMAIL]/Documents
|
||||
vendor = other
|
||||
user = YourEmailAddress
|
||||
pass = encryptedpassword
|
||||
```
|
||||
```
|
||||
|
||||
### dCache ###
|
||||
|
||||
dCache is a storage system with WebDAV doors that support, beside basic and x509,
|
||||
authentication with [Macaroons](https://www.dcache.org/manuals/workshop-2017-05-29-Umea/000-Final/anupam_macaroons_v02.pdf) (bearer tokens).
|
||||
|
||||
Configure as normal using the `other` type. Don't enter a username or
|
||||
password, instead enter your Macaroon as the `bearer_token`.
|
||||
|
||||
The config will end up looking something like this.
|
||||
|
||||
```
|
||||
[dcache]
|
||||
type = webdav
|
||||
url = https://dcache...
|
||||
vendor = other
|
||||
user =
|
||||
pass =
|
||||
bearer_token = your-macaroon
|
||||
```
|
||||
|
||||
There is a [script](https://github.com/onnozweers/dcache-scripts/blob/master/get-share-link) that
|
||||
obtains a Macaroon from a dCache WebDAV endpoint, and creates an rclone config file.
|
||||
|
||||
24
fs/config.go
24
fs/config.go
@@ -2,6 +2,7 @@ package fs
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -14,7 +15,15 @@ var (
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
ConfigFileGet = func(section, key string, defaultVal ...string) string { return "" }
|
||||
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
|
||||
|
||||
// Set a value into the config file
|
||||
//
|
||||
// This is a function pointer to decouple the config
|
||||
// implementation from the fs
|
||||
ConfigFileSet = func(section, key, value string) {
|
||||
Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section)
|
||||
}
|
||||
|
||||
// CountError counts an error. If any errors have been
|
||||
// counted then it will exit with a non zero error code.
|
||||
@@ -103,3 +112,16 @@ func NewConfig() *ConfigInfo {
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ConfigToEnv converts an config section and name, eg ("myremote",
|
||||
// "ignore-size") into an environment name
|
||||
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
|
||||
func ConfigToEnv(section, name string) string {
|
||||
return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
|
||||
}
|
||||
|
||||
// OptionToEnv converts an option name, eg "ignore-size" into an
|
||||
// environment name "RCLONE_IGNORE_SIZE"
|
||||
func OptionToEnv(name string) string {
|
||||
return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
|
||||
}
|
||||
|
||||
@@ -27,9 +27,11 @@ import (
|
||||
"github.com/Unknwon/goconfig"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -80,8 +82,9 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Set the function pointer up in fs
|
||||
fs.ConfigFileGet = FileGet
|
||||
// Set the function pointers up in fs
|
||||
fs.ConfigFileGet = FileGetFlag
|
||||
fs.ConfigFileSet = FileSet
|
||||
}
|
||||
|
||||
func getConfigData() *goconfig.ConfigFile {
|
||||
@@ -151,11 +154,24 @@ func makeConfigPath() string {
|
||||
return homeconf
|
||||
}
|
||||
|
||||
// Check to see if user supplied a --config variable or environment
|
||||
// variable. We can't use pflag for this because it isn't initialised
|
||||
// yet so we search the command line manually.
|
||||
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
|
||||
for _, item := range os.Args {
|
||||
if item == "--config" || strings.HasPrefix(item, "--config=") {
|
||||
configSupplied = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Default to ./.rclone.conf (current working directory)
|
||||
fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
|
||||
fs.Errorf(nil, "Defaulting to storing config in current directory.")
|
||||
fs.Errorf(nil, "Use --config flag to workaround.")
|
||||
fs.Errorf(nil, "Error was: %v", err)
|
||||
if !configSupplied {
|
||||
fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.")
|
||||
fs.Errorf(nil, "Defaulting to storing config in current directory.")
|
||||
fs.Errorf(nil, "Use --config flag to workaround.")
|
||||
fs.Errorf(nil, "Error was: %v", err)
|
||||
}
|
||||
return hiddenConfigFileName
|
||||
}
|
||||
|
||||
@@ -691,7 +707,8 @@ func RemoteConfig(name string) {
|
||||
fmt.Printf("Remote config\n")
|
||||
f := MustFindByName(name)
|
||||
if f.Config != nil {
|
||||
f.Config(name)
|
||||
m := fs.ConfigMap(f, name)
|
||||
f.Config(name, m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -731,7 +748,7 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
fmt.Println(o.Help)
|
||||
if o.IsPassword {
|
||||
actions := []string{"yYes type in my own password", "gGenerate random password"}
|
||||
if o.Optional {
|
||||
if !o.Required {
|
||||
actions = append(actions, "nNo leave this optional password blank")
|
||||
}
|
||||
var password string
|
||||
@@ -765,19 +782,52 @@ func ChooseOption(o *fs.Option, name string) string {
|
||||
}
|
||||
return obscure.MustObscure(password)
|
||||
}
|
||||
if len(o.Examples) > 0 {
|
||||
var values []string
|
||||
var help []string
|
||||
for _, example := range o.Examples {
|
||||
if matchProvider(example.Provider, subProvider) {
|
||||
values = append(values, example.Value)
|
||||
help = append(help, example.Help)
|
||||
}
|
||||
}
|
||||
return Choose(o.Name, values, help, true)
|
||||
what := fmt.Sprintf("%T value", o.Default)
|
||||
switch o.Default.(type) {
|
||||
case bool:
|
||||
what = "boolean value (true or false)"
|
||||
case fs.SizeSuffix:
|
||||
what = "size with suffix k,M,G,T"
|
||||
case fs.Duration:
|
||||
what = "duration s,m,h,d,w,M,y"
|
||||
case int, int8, int16, int32, int64:
|
||||
what = "signed integer"
|
||||
case uint, byte, uint16, uint32, uint64:
|
||||
what = "unsigned integer"
|
||||
}
|
||||
fmt.Printf("%s> ", o.Name)
|
||||
return ReadLine()
|
||||
var in string
|
||||
for {
|
||||
fmt.Printf("Enter a %s. Press Enter for the default (%q).\n", what, fmt.Sprint(o.Default))
|
||||
if len(o.Examples) > 0 {
|
||||
var values []string
|
||||
var help []string
|
||||
for _, example := range o.Examples {
|
||||
if matchProvider(example.Provider, subProvider) {
|
||||
values = append(values, example.Value)
|
||||
help = append(help, example.Help)
|
||||
}
|
||||
}
|
||||
in = Choose(o.Name, values, help, true)
|
||||
} else {
|
||||
fmt.Printf("%s> ", o.Name)
|
||||
in = ReadLine()
|
||||
}
|
||||
if in == "" {
|
||||
if o.Required && fmt.Sprint(o.Default) == "" {
|
||||
fmt.Printf("This value is required and it has no default.\n")
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
newIn, err := configstruct.StringToInterface(o.Default, in)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to parse %q: %v\n", in, err)
|
||||
continue
|
||||
}
|
||||
in = fmt.Sprint(newIn) // canonicalise
|
||||
break
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// UpdateRemote adds the keyValues passed in to the remote of name.
|
||||
@@ -846,8 +896,9 @@ func JSONListProviders() error {
|
||||
// fsOption returns an Option describing the possible remotes
|
||||
func fsOption() *fs.Option {
|
||||
o := &fs.Option{
|
||||
Name: "Storage",
|
||||
Help: "Type of storage to configure.",
|
||||
Name: "Storage",
|
||||
Help: "Type of storage to configure.",
|
||||
Default: "",
|
||||
}
|
||||
for _, item := range fs.Registry {
|
||||
example := fs.OptionExample{
|
||||
@@ -865,12 +916,12 @@ func NewRemoteName() (name string) {
|
||||
for {
|
||||
fmt.Printf("name> ")
|
||||
name = ReadLine()
|
||||
parts := fs.Matcher.FindStringSubmatch(name + ":")
|
||||
parts := fspath.Matcher.FindStringSubmatch(name + ":")
|
||||
switch {
|
||||
case name == "":
|
||||
fmt.Printf("Can't use empty name.\n")
|
||||
case driveletter.IsDriveLetter(name):
|
||||
fmt.Printf("Can't use %q as it can be confused a drive letter.\n", name)
|
||||
fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name)
|
||||
case parts == nil:
|
||||
fmt.Printf("Can't use %q as it has invalid characters in it.\n", name)
|
||||
default:
|
||||
@@ -879,17 +930,61 @@ func NewRemoteName() (name string) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(name string) {
|
||||
newType := ChooseOption(fsOption(), name)
|
||||
getConfigData().SetValue(name, "type", newType)
|
||||
ri := fs.MustFind(newType)
|
||||
for _, option := range ri.Options {
|
||||
subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "")
|
||||
if matchProvider(option.Provider, subProvider) {
|
||||
getConfigData().SetValue(name, option.Name, ChooseOption(&option, name))
|
||||
// editOptions edits the options. If new is true then it just allows
|
||||
// entry and doesn't show any old values.
|
||||
func editOptions(ri *fs.RegInfo, name string, new bool) {
|
||||
hasAdvanced := false
|
||||
for _, advanced := range []bool{false, true} {
|
||||
if advanced {
|
||||
if !hasAdvanced {
|
||||
break
|
||||
}
|
||||
fmt.Printf("Edit advanced config? (y/n)\n")
|
||||
if !Confirm() {
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, option := range ri.Options {
|
||||
hasAdvanced = hasAdvanced || option.Advanced
|
||||
if option.Advanced != advanced {
|
||||
continue
|
||||
}
|
||||
subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "")
|
||||
if matchProvider(option.Provider, subProvider) {
|
||||
if !new {
|
||||
fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name))
|
||||
fmt.Printf("Edit? (y/n)>\n")
|
||||
if !Confirm() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
FileSet(name, option.Name, ChooseOption(&option, name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(name string) {
|
||||
var (
|
||||
newType string
|
||||
ri *fs.RegInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Set the type first
|
||||
for {
|
||||
newType = ChooseOption(fsOption(), name)
|
||||
ri, err = fs.Find(newType)
|
||||
if err != nil {
|
||||
fmt.Printf("Bad remote %q: %v\n", newType, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
getConfigData().SetValue(name, "type", newType)
|
||||
|
||||
editOptions(ri, name, true)
|
||||
RemoteConfig(name)
|
||||
if OkRemote(name) {
|
||||
SaveConfig()
|
||||
@@ -902,25 +997,8 @@ func NewRemote(name string) {
|
||||
func EditRemote(ri *fs.RegInfo, name string) {
|
||||
ShowRemote(name)
|
||||
fmt.Printf("Edit remote\n")
|
||||
subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "")
|
||||
for {
|
||||
for _, option := range ri.Options {
|
||||
key := option.Name
|
||||
value := FileGet(name, key)
|
||||
if !matchProvider(option.Provider, subProvider) {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Value %q = %q\n", key, value)
|
||||
fmt.Printf("Edit? (y/n)>\n")
|
||||
if Confirm() {
|
||||
newValue := ChooseOption(&option, name)
|
||||
getConfigData().SetValue(name, key, newValue)
|
||||
// Update subProvider if it changed
|
||||
if key == fs.ConfigProvider {
|
||||
subProvider = newValue
|
||||
}
|
||||
}
|
||||
}
|
||||
editOptions(ri, name, false)
|
||||
if OkRemote(name) {
|
||||
break
|
||||
}
|
||||
@@ -1075,8 +1153,8 @@ func Authorize(args []string) {
|
||||
log.Fatalf("Invalid number of arguments: %d", len(args))
|
||||
}
|
||||
newType := args[0]
|
||||
fs := fs.MustFind(newType)
|
||||
if fs.Config == nil {
|
||||
f := fs.MustFind(newType)
|
||||
if f.Config == nil {
|
||||
log.Fatalf("Can't authorize fs %q", newType)
|
||||
}
|
||||
// Name used for temporary fs
|
||||
@@ -1091,14 +1169,15 @@ func Authorize(args []string) {
|
||||
getConfigData().SetValue(name, ConfigClientID, args[1])
|
||||
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
||||
}
|
||||
fs.Config(name)
|
||||
m := fs.ConfigMap(f, name)
|
||||
f.Config(name, m)
|
||||
}
|
||||
|
||||
// configToEnv converts an config section and name, eg ("myremote",
|
||||
// "ignore-size") into an environment name
|
||||
// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE"
|
||||
func configToEnv(section, name string) string {
|
||||
return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1))
|
||||
// FileGetFlag gets the config key under section returning the
|
||||
// the value and true if found and or ("", false) otherwise
|
||||
func FileGetFlag(section, key string) (string, bool) {
|
||||
newValue, err := getConfigData().GetValue(section, key)
|
||||
return newValue, err == nil
|
||||
}
|
||||
|
||||
// FileGet gets the config key under section returning the
|
||||
@@ -1106,7 +1185,7 @@ func configToEnv(section, name string) string {
|
||||
//
|
||||
// It looks up defaults in the environment if they are present
|
||||
func FileGet(section, key string, defaultVal ...string) string {
|
||||
envKey := configToEnv(section, key)
|
||||
envKey := fs.ConfigToEnv(section, key)
|
||||
newValue, found := os.LookupEnv(envKey)
|
||||
if found {
|
||||
defaultVal = []string{newValue}
|
||||
@@ -1114,46 +1193,14 @@ func FileGet(section, key string, defaultVal ...string) string {
|
||||
return getConfigData().MustValue(section, key, defaultVal...)
|
||||
}
|
||||
|
||||
// FileGetBool gets the config key under section returning the
|
||||
// default or false if not set.
|
||||
//
|
||||
// It looks up defaults in the environment if they are present
|
||||
func FileGetBool(section, key string, defaultVal ...bool) bool {
|
||||
envKey := configToEnv(section, key)
|
||||
newValue, found := os.LookupEnv(envKey)
|
||||
if found {
|
||||
newBool, err := strconv.ParseBool(newValue)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
|
||||
} else {
|
||||
defaultVal = []bool{newBool}
|
||||
}
|
||||
}
|
||||
return getConfigData().MustBool(section, key, defaultVal...)
|
||||
}
|
||||
|
||||
// FileGetInt gets the config key under section returning the
|
||||
// default or 0 if not set.
|
||||
//
|
||||
// It looks up defaults in the environment if they are present
|
||||
func FileGetInt(section, key string, defaultVal ...int) int {
|
||||
envKey := configToEnv(section, key)
|
||||
newValue, found := os.LookupEnv(envKey)
|
||||
if found {
|
||||
newInt, err := strconv.Atoi(newValue)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
|
||||
} else {
|
||||
defaultVal = []int{newInt}
|
||||
}
|
||||
}
|
||||
return getConfigData().MustInt(section, key, defaultVal...)
|
||||
}
|
||||
|
||||
// FileSet sets the key in section to value. It doesn't save
|
||||
// the config file.
|
||||
func FileSet(section, key, value string) {
|
||||
getConfigData().SetValue(section, key, value)
|
||||
if value != "" {
|
||||
getConfigData().SetValue(section, key, value)
|
||||
} else {
|
||||
FileDeleteKey(section, key)
|
||||
}
|
||||
}
|
||||
|
||||
// FileDeleteKey deletes the config key in the config file.
|
||||
|
||||
@@ -16,7 +16,11 @@ import (
|
||||
|
||||
// ReadPassword reads a password without echoing it to the terminal.
|
||||
func ReadPassword() string {
|
||||
line, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
stdin := int(os.Stdin.Fd())
|
||||
if !terminal.IsTerminal(stdin) {
|
||||
return ReadLine()
|
||||
}
|
||||
line, err := terminal.ReadPassword(stdin)
|
||||
_, _ = fmt.Fprintln(os.Stderr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read password: %v", err)
|
||||
|
||||
@@ -52,8 +52,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
|
||||
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
||||
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
||||
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
||||
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
|
||||
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transfering (default)")
|
||||
flags.IntVar64P(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
|
||||
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible")
|
||||
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
|
||||
|
||||
86
fs/config/configmap/configmap.go
Normal file
86
fs/config/configmap/configmap.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Package configmap provides an abstraction for reading and writing config
|
||||
package configmap
|
||||
|
||||
// Getter provides an interface to get config items
|
||||
type Getter interface {
|
||||
// Get should get an item with the key passed in and return
|
||||
// the value. If the item is found then it should return true,
|
||||
// otherwise false.
|
||||
Get(key string) (value string, ok bool)
|
||||
}
|
||||
|
||||
// Setter provides an interface to set config items
|
||||
type Setter interface {
|
||||
// Set should set an item into persistent config store.
|
||||
Set(key, value string)
|
||||
}
|
||||
|
||||
// Mapper provides an interface to read and write config
|
||||
type Mapper interface {
|
||||
Getter
|
||||
Setter
|
||||
}
|
||||
|
||||
// Map provides a wrapper around multiple Setter and
|
||||
// Getter interfaces.
|
||||
type Map struct {
|
||||
setters []Setter
|
||||
getters []Getter
|
||||
}
|
||||
|
||||
// New returns an empty Map
|
||||
func New() *Map {
|
||||
return &Map{}
|
||||
}
|
||||
|
||||
// AddGetter appends a getter onto the end of the getters
|
||||
func (c *Map) AddGetter(getter Getter) *Map {
|
||||
c.getters = append(c.getters, getter)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddGetters appends multiple getters onto the end of the getters
|
||||
func (c *Map) AddGetters(getters ...Getter) *Map {
|
||||
c.getters = append(c.getters, getters...)
|
||||
return c
|
||||
}
|
||||
|
||||
// AddSetter appends a setter onto the end of the setters
|
||||
func (c *Map) AddSetter(setter Setter) *Map {
|
||||
c.setters = append(c.setters, setter)
|
||||
return c
|
||||
}
|
||||
|
||||
// Get gets an item with the key passed in and return the value from
|
||||
// the first getter. If the item is found then it returns true,
|
||||
// otherwise false.
|
||||
func (c *Map) Get(key string) (value string, ok bool) {
|
||||
for _, do := range c.getters {
|
||||
value, ok = do.Get(key)
|
||||
if ok {
|
||||
return value, ok
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Set sets an item into all the stored setters.
|
||||
func (c *Map) Set(key, value string) {
|
||||
for _, do := range c.setters {
|
||||
do.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Simple is a simple Mapper for testing
|
||||
type Simple map[string]string
|
||||
|
||||
// Get the value
|
||||
func (c Simple) Get(key string) (value string, ok bool) {
|
||||
value, ok = c[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// Set the value
|
||||
func (c Simple) Set(key, value string) {
|
||||
c[key] = value
|
||||
}
|
||||
91
fs/config/configmap/configmap_test.go
Normal file
91
fs/config/configmap/configmap_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package configmap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
_ Mapper = (Simple)(nil)
|
||||
_ Getter = (Simple)(nil)
|
||||
_ Setter = (Simple)(nil)
|
||||
)
|
||||
|
||||
func TestConfigMapGet(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
value, found := m.Get("config1")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
m1 := Simple{
|
||||
"config1": "one",
|
||||
}
|
||||
|
||||
m.AddGetter(m1)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "", value)
|
||||
assert.Equal(t, false, found)
|
||||
|
||||
m2 := Simple{
|
||||
"config1": "one2",
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddGetter(m2)
|
||||
|
||||
value, found = m.Get("config1")
|
||||
assert.Equal(t, "one", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
value, found = m.Get("config2")
|
||||
assert.Equal(t, "two2", value)
|
||||
assert.Equal(t, true, found)
|
||||
|
||||
}
|
||||
|
||||
func TestConfigMapSet(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m1 := Simple{
|
||||
"config1": "one",
|
||||
}
|
||||
m2 := Simple{
|
||||
"config1": "one2",
|
||||
"config2": "two2",
|
||||
}
|
||||
|
||||
m.AddSetter(m1).AddSetter(m2)
|
||||
|
||||
m.Set("config2", "potato")
|
||||
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "one",
|
||||
"config2": "potato",
|
||||
}, m1)
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "one2",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
|
||||
m.Set("config1", "beetroot")
|
||||
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m1)
|
||||
assert.Equal(t, Simple{
|
||||
"config1": "beetroot",
|
||||
"config2": "potato",
|
||||
}, m2)
|
||||
}
|
||||
127
fs/config/configstruct/configstruct.go
Normal file
127
fs/config/configstruct/configstruct.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Package configstruct parses unstructured maps into structures
|
||||
package configstruct
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var matchUpper = regexp.MustCompile("([A-Z]+)")
|
||||
|
||||
// camelToSnake converts CamelCase to snake_case
|
||||
func camelToSnake(in string) string {
|
||||
out := matchUpper.ReplaceAllString(in, "_$1")
|
||||
out = strings.ToLower(out)
|
||||
out = strings.Trim(out, "_")
|
||||
return out
|
||||
}
|
||||
|
||||
// StringToInterface turns in into an interface{} the same type as def
|
||||
func StringToInterface(def interface{}, in string) (newValue interface{}, err error) {
|
||||
typ := reflect.TypeOf(def)
|
||||
switch typ.Kind() {
|
||||
case reflect.String:
|
||||
// Pass strings unmodified
|
||||
return in, nil
|
||||
}
|
||||
// Otherwise parse with Sscanln
|
||||
//
|
||||
// This means any types we use here must implement fmt.Scanner
|
||||
o := reflect.New(typ)
|
||||
n, err := fmt.Sscanln(in, o.Interface())
|
||||
if err != nil {
|
||||
return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def)
|
||||
}
|
||||
if n != 1 {
|
||||
return newValue, errors.New("no items parsed")
|
||||
}
|
||||
return o.Elem().Interface(), nil
|
||||
}
|
||||
|
||||
// Item descripts a single entry in the options structure
|
||||
type Item struct {
|
||||
Name string // snake_case
|
||||
Field string // CamelCase
|
||||
Num int // number of the field in the struct
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Items parses the opt struct and returns a slice of Item objects.
|
||||
//
|
||||
// opt must be a pointer to a struct. The struct should have entirely
|
||||
// public fields.
|
||||
//
|
||||
// The config_name is looked up in a struct tag called "config" or if
|
||||
// not found is the field name converted from CamelCase to snake_case.
|
||||
func Items(opt interface{}) (items []Item, err error) {
|
||||
def := reflect.ValueOf(opt)
|
||||
if def.Kind() != reflect.Ptr {
|
||||
return nil, errors.New("argument must be a pointer")
|
||||
}
|
||||
def = def.Elem() // indirect the pointer
|
||||
if def.Kind() != reflect.Struct {
|
||||
return nil, errors.New("argument must be a pointer to a struct")
|
||||
}
|
||||
defType := def.Type()
|
||||
for i := 0; i < def.NumField(); i++ {
|
||||
field := defType.Field(i)
|
||||
fieldName := field.Name
|
||||
configName, ok := field.Tag.Lookup("config")
|
||||
if !ok {
|
||||
configName = camelToSnake(fieldName)
|
||||
}
|
||||
defaultItem := Item{
|
||||
Name: configName,
|
||||
Field: fieldName,
|
||||
Num: i,
|
||||
Value: def.Field(i).Interface(),
|
||||
}
|
||||
items = append(items, defaultItem)
|
||||
}
|
||||
return items, nil
|
||||
}
|
||||
|
||||
// Set interprets the field names in defaults and looks up config
|
||||
// values in the config passed in. Any values found in config will be
|
||||
// set in the opt structure.
|
||||
//
|
||||
// opt must be a pointer to a struct. The struct should have entirely
|
||||
// public fields. The field names are converted from CamelCase to
|
||||
// snake_case and looked up in the config supplied or a
|
||||
// `config:"field_name"` is looked up.
|
||||
//
|
||||
// If items are found then they are converted from string to native
|
||||
// types and set in opt.
|
||||
//
|
||||
// All the field types in the struct must implement fmt.Scanner.
|
||||
func Set(config configmap.Getter, opt interface{}) (err error) {
|
||||
defaultItems, err := Items(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defStruct := reflect.ValueOf(opt).Elem()
|
||||
for _, defaultItem := range defaultItems {
|
||||
newValue := defaultItem.Value
|
||||
if configValue, ok := config.Get(defaultItem.Name); ok {
|
||||
var newNewValue interface{}
|
||||
newNewValue, err = StringToInterface(newValue, configValue)
|
||||
if err != nil {
|
||||
// Mask errors if setting an empty string as
|
||||
// it isn't valid for all types. This makes
|
||||
// empty string be the equivalent of unset.
|
||||
if configValue != "" {
|
||||
return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value)
|
||||
}
|
||||
} else {
|
||||
newValue = newNewValue
|
||||
}
|
||||
}
|
||||
defStruct.Field(defaultItem.Num).Set(reflect.ValueOf(newValue))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
116
fs/config/configstruct/configstruct_test.go
Normal file
116
fs/config/configstruct/configstruct_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package configstruct_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type conf struct {
|
||||
A string
|
||||
B string
|
||||
}
|
||||
|
||||
type conf2 struct {
|
||||
PotatoPie string `config:"spud_pie"`
|
||||
BeanStew bool
|
||||
RaisinRoll int
|
||||
SausageOnStick int64
|
||||
ForbiddenFruit uint
|
||||
CookingTime fs.Duration
|
||||
TotalWeight fs.SizeSuffix
|
||||
}
|
||||
|
||||
func TestItemsError(t *testing.T) {
|
||||
_, err := configstruct.Items(nil)
|
||||
assert.EqualError(t, err, "argument must be a pointer")
|
||||
_, err = configstruct.Items(new(int))
|
||||
assert.EqualError(t, err, "argument must be a pointer to a struct")
|
||||
}
|
||||
|
||||
func TestItems(t *testing.T) {
|
||||
in := &conf2{
|
||||
PotatoPie: "yum",
|
||||
BeanStew: true,
|
||||
RaisinRoll: 42,
|
||||
SausageOnStick: 101,
|
||||
ForbiddenFruit: 6,
|
||||
CookingTime: fs.Duration(42 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(17 << 20),
|
||||
}
|
||||
got, err := configstruct.Items(in)
|
||||
require.NoError(t, err)
|
||||
want := []configstruct.Item{
|
||||
{Name: "spud_pie", Field: "PotatoPie", Num: 0, Value: string("yum")},
|
||||
{Name: "bean_stew", Field: "BeanStew", Num: 1, Value: true},
|
||||
{Name: "raisin_roll", Field: "RaisinRoll", Num: 2, Value: int(42)},
|
||||
{Name: "sausage_on_stick", Field: "SausageOnStick", Num: 3, Value: int64(101)},
|
||||
{Name: "forbidden_fruit", Field: "ForbiddenFruit", Num: 4, Value: uint(6)},
|
||||
{Name: "cooking_time", Field: "CookingTime", Num: 5, Value: fs.Duration(42 * time.Second)},
|
||||
{Name: "total_weight", Field: "TotalWeight", Num: 6, Value: fs.SizeSuffix(17 << 20)},
|
||||
}
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestSetBasics(t *testing.T) {
|
||||
c := &conf{A: "one", B: "two"}
|
||||
err := configstruct.Set(configMap{}, c)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &conf{A: "one", B: "two"}, c)
|
||||
}
|
||||
|
||||
// a simple configmap.Getter for testing
|
||||
type configMap map[string]string
|
||||
|
||||
// Get the value
|
||||
func (c configMap) Get(key string) (value string, ok bool) {
|
||||
value, ok = c[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func TestSetMore(t *testing.T) {
|
||||
c := &conf{A: "one", B: "two"}
|
||||
m := configMap{
|
||||
"a": "ONE",
|
||||
}
|
||||
err := configstruct.Set(m, c)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &conf{A: "ONE", B: "two"}, c)
|
||||
}
|
||||
|
||||
func TestSetFull(t *testing.T) {
|
||||
in := &conf2{
|
||||
PotatoPie: "yum",
|
||||
BeanStew: true,
|
||||
RaisinRoll: 42,
|
||||
SausageOnStick: 101,
|
||||
ForbiddenFruit: 6,
|
||||
CookingTime: fs.Duration(42 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(17 << 20),
|
||||
}
|
||||
m := configMap{
|
||||
"spud_pie": "YUM",
|
||||
"bean_stew": "FALSE",
|
||||
"raisin_roll": "43 ",
|
||||
"sausage_on_stick": " 102 ",
|
||||
"forbidden_fruit": "0x7",
|
||||
"cooking_time": "43s",
|
||||
"total_weight": "18M",
|
||||
}
|
||||
want := &conf2{
|
||||
PotatoPie: "YUM",
|
||||
BeanStew: false,
|
||||
RaisinRoll: 43,
|
||||
SausageOnStick: 102,
|
||||
ForbiddenFruit: 7,
|
||||
CookingTime: fs.Duration(43 * time.Second),
|
||||
TotalWeight: fs.SizeSuffix(18 << 20),
|
||||
}
|
||||
err := configstruct.Set(m, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, in)
|
||||
}
|
||||
60
fs/config/configstruct/internal_test.go
Normal file
60
fs/config/configstruct/internal_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package configstruct
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCamelToSnake(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Type", "type"},
|
||||
{"AuthVersion", "auth_version"},
|
||||
{"AccessKeyID", "access_key_id"},
|
||||
} {
|
||||
got := camelToSnake(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringToInterface(t *testing.T) {
|
||||
item := struct{ A int }{2}
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
def interface{}
|
||||
want interface{}
|
||||
err string
|
||||
}{
|
||||
{"", string(""), "", ""},
|
||||
{" string ", string(""), " string ", ""},
|
||||
{"123", int(0), int(123), ""},
|
||||
{"0x123", int(0), int(0x123), ""},
|
||||
{" 0x123 ", int(0), int(0x123), ""},
|
||||
{"-123", int(0), int(-123), ""},
|
||||
{"0", false, false, ""},
|
||||
{"1", false, true, ""},
|
||||
{"FALSE", false, false, ""},
|
||||
{"true", false, true, ""},
|
||||
{"123", uint(0), uint(123), ""},
|
||||
{"123", int64(0), int64(123), ""},
|
||||
{"123x", int64(0), nil, "parsing \"123x\" as int64 failed: expected newline"},
|
||||
{"truth", false, nil, "parsing \"truth\" as bool failed: syntax error scanning boolean"},
|
||||
{"struct", item, nil, "parsing \"struct\" as struct { A int } failed: can't scan type: *struct { A int }"},
|
||||
} {
|
||||
what := fmt.Sprintf("parse %q as %T", test.in, test.def)
|
||||
got, err := StringToInterface(test.def, test.in)
|
||||
if test.err == "" {
|
||||
require.NoError(t, err, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
} else {
|
||||
assert.Nil(t, got)
|
||||
assert.EqualError(t, err, test.err, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,23 +5,16 @@ package flags
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// optionToEnv converts an option name, eg "ignore-size" into an
|
||||
// environment name "RCLONE_IGNORE_SIZE"
|
||||
func optionToEnv(name string) string {
|
||||
return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
|
||||
}
|
||||
|
||||
// setDefaultFromEnv constructs a name from the flag passed in and
|
||||
// sets the default from the environment if possible.
|
||||
func setDefaultFromEnv(name string) {
|
||||
key := optionToEnv(name)
|
||||
key := fs.OptionToEnv(name)
|
||||
newValue, found := os.LookupEnv(key)
|
||||
if found {
|
||||
flag := pflag.Lookup(name)
|
||||
|
||||
@@ -173,7 +173,7 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
}
|
||||
|
||||
if addImplicitExclude && foundExcludeRule {
|
||||
fs.Infof(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
}
|
||||
|
||||
for _, rule := range f.Opt.FilterRule {
|
||||
|
||||
228
fs/fs.go
228
fs/fs.go
@@ -2,6 +2,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
@@ -9,12 +10,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -69,24 +71,87 @@ type RegInfo struct {
|
||||
Name string
|
||||
// Description of this fs - defaults to Name
|
||||
Description string
|
||||
// Prefix for command line flags for this fs - defaults to Name if not set
|
||||
Prefix string
|
||||
// Create a new file system. If root refers to an existing
|
||||
// object, then it should return a Fs which which points to
|
||||
// the parent of that object and ErrorIsFile.
|
||||
NewFs func(name string, root string) (Fs, error) `json:"-"`
|
||||
NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
|
||||
// Function to call to help with config
|
||||
Config func(string) `json:"-"`
|
||||
Config func(name string, config configmap.Mapper) `json:"-"`
|
||||
// Options for the Fs configuration
|
||||
Options []Option
|
||||
Options Options
|
||||
}
|
||||
|
||||
// Options is a slice of configuration Option for a backend
|
||||
type Options []Option
|
||||
|
||||
// Set the default values for the options
|
||||
func (os Options) setValues() {
|
||||
for i := range os {
|
||||
o := &os[i]
|
||||
if o.Default == nil {
|
||||
o.Default = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OptionVisibility controls whether the options are visible in the
|
||||
// configurator or the command line.
|
||||
type OptionVisibility byte
|
||||
|
||||
// Constants Option.Hide
|
||||
const (
|
||||
OptionHideCommandLine OptionVisibility = 1 << iota
|
||||
OptionHideConfigurator
|
||||
OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
|
||||
)
|
||||
|
||||
// Option is describes an option for the config wizard
|
||||
//
|
||||
// This also describes command line options and environment variables
|
||||
type Option struct {
|
||||
Name string
|
||||
Help string
|
||||
Provider string
|
||||
Optional bool
|
||||
IsPassword bool
|
||||
Examples OptionExamples `json:",omitempty"`
|
||||
Name string // name of the option in snake_case
|
||||
Help string // Help, the first line only is used for the command line help
|
||||
Provider string // Set to filter on provider
|
||||
Default interface{} // default value, nil => ""
|
||||
Value interface{} // value to be set by flags
|
||||
Examples OptionExamples `json:",omitempty"` // config examples
|
||||
ShortOpt string // the short option for this if required
|
||||
Hide OptionVisibility // set this to hide the config from the configurator or the command line
|
||||
Required bool // this option is required
|
||||
IsPassword bool // set if the option is a password
|
||||
NoPrefix bool // set if the option for this should not use the backend prefix
|
||||
Advanced bool // set if this is an advanced config option
|
||||
}
|
||||
|
||||
// Gets the current current value which is the default if not set
|
||||
func (o *Option) value() interface{} {
|
||||
val := o.Value
|
||||
if val == nil {
|
||||
val = o.Default
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// String turns Option into a string
|
||||
func (o *Option) String() string {
|
||||
return fmt.Sprint(o.value())
|
||||
}
|
||||
|
||||
// Set a Option from a string
|
||||
func (o *Option) Set(s string) (err error) {
|
||||
newValue, err := configstruct.StringToInterface(o.value(), s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Value = newValue
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (o *Option) Type() string {
|
||||
return reflect.TypeOf(o.value()).Name()
|
||||
}
|
||||
|
||||
// OptionExamples is a slice of examples
|
||||
@@ -115,6 +180,10 @@ type OptionExample struct {
|
||||
//
|
||||
// Fs modules should use this in an init() function
|
||||
func Register(info *RegInfo) {
|
||||
info.Options.setValues()
|
||||
if info.Prefix == "" {
|
||||
info.Prefix = info.Name
|
||||
}
|
||||
Registry = append(Registry, info)
|
||||
}
|
||||
|
||||
@@ -786,28 +855,139 @@ func MustFind(name string) *RegInfo {
|
||||
return fs
|
||||
}
|
||||
|
||||
// Matcher is a pattern to match an rclone URL
|
||||
var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// ParseRemote deconstructs a path into configName, fsPath, looking up
|
||||
// the fsName in the config file (returning NotFoundInConfigFile if not found)
|
||||
func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) {
|
||||
parts := Matcher.FindStringSubmatch(path)
|
||||
configName, fsPath = fspath.Parse(path)
|
||||
var fsName string
|
||||
fsName, configName, fsPath = "local", "local", path
|
||||
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
fsName = ConfigFileGet(configName, "type")
|
||||
if fsName == "" {
|
||||
var ok bool
|
||||
if configName != "" {
|
||||
m := ConfigMap(nil, configName)
|
||||
fsName, ok = m.Get("type")
|
||||
if !ok {
|
||||
return nil, "", "", ErrorNotFoundInConfigFile
|
||||
}
|
||||
} else {
|
||||
fsName = "local"
|
||||
configName = "local"
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
fsInfo, err = Find(fsName)
|
||||
return fsInfo, configName, fsPath, err
|
||||
}
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
||||
type configEnvVars string
|
||||
|
||||
// Get a config item from the environment variables if possible
|
||||
func (configName configEnvVars) Get(key string) (value string, ok bool) {
|
||||
return os.LookupEnv(ConfigToEnv(string(configName), key))
|
||||
}
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_option_name
|
||||
type optionEnvVars string
|
||||
|
||||
// Get a config item from the option environment variables if possible
|
||||
func (prefix optionEnvVars) Get(key string) (value string, ok bool) {
|
||||
return os.LookupEnv(OptionToEnv(string(prefix) + "-" + key))
|
||||
}
|
||||
|
||||
// A configmap.Getter to read either the default value or the set
|
||||
// value from the RegInfo.Options
|
||||
type regInfoValues struct {
|
||||
fsInfo *RegInfo
|
||||
useDefault bool
|
||||
}
|
||||
|
||||
// override the values in configMap with the either the flag values or
|
||||
// the default values
|
||||
func (r *regInfoValues) Get(key string) (value string, ok bool) {
|
||||
for i := range r.fsInfo.Options {
|
||||
o := &r.fsInfo.Options[i]
|
||||
if o.Name == key {
|
||||
if r.useDefault || o.Value != nil {
|
||||
return o.String(), true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// A configmap.Setter to read from the config file
|
||||
type setConfigFile string
|
||||
|
||||
// Set a config item into the config file
|
||||
func (section setConfigFile) Set(key, value string) {
|
||||
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
|
||||
ConfigFileSet(string(section), key, value)
|
||||
}
|
||||
|
||||
// A configmap.Getter to read from the config file
|
||||
type getConfigFile string
|
||||
|
||||
// Get a config item from the config file
|
||||
func (section getConfigFile) Get(key string) (value string, ok bool) {
|
||||
value, ok = ConfigFileGet(string(section), key)
|
||||
// Ignore empty lines in the config file
|
||||
if value == "" {
|
||||
ok = false
|
||||
}
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// ConfigMap creates a configmap.Map from the *RegInfo and the
|
||||
// configName passed in.
|
||||
//
|
||||
// If fsInfo is nil then the returned configmap.Map should only be
|
||||
// used for reading non backend specific parameters, such as "type".
|
||||
func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) {
|
||||
// Create the config
|
||||
config = configmap.New()
|
||||
|
||||
// Read the config, more specific to least specific
|
||||
|
||||
// flag values
|
||||
if fsInfo != nil {
|
||||
config.AddGetter(®InfoValues{fsInfo, false})
|
||||
}
|
||||
|
||||
// remote specific environment vars
|
||||
config.AddGetter(configEnvVars(configName))
|
||||
|
||||
// backend specific environment vars
|
||||
if fsInfo != nil {
|
||||
config.AddGetter(optionEnvVars(fsInfo.Prefix))
|
||||
}
|
||||
|
||||
// config file
|
||||
config.AddGetter(getConfigFile(configName))
|
||||
|
||||
// default values
|
||||
if fsInfo != nil {
|
||||
config.AddGetter(®InfoValues{fsInfo, true})
|
||||
}
|
||||
|
||||
// Set Config
|
||||
config.AddSetter(setConfigFile(configName))
|
||||
return config
|
||||
}
|
||||
|
||||
// ConfigFs makes the config for calling NewFs with.
|
||||
//
|
||||
// It parses the path which is of the form remote:path
|
||||
//
|
||||
// Remotes are looked up in the config file. If the remote isn't
|
||||
// found then NotFoundInConfigFile will be returned.
|
||||
func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) {
|
||||
// Parse the remote path
|
||||
fsInfo, configName, fsPath, err = ParseRemote(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
config = ConfigMap(fsInfo, configName)
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
// The path is of the form remote:path
|
||||
@@ -818,11 +998,11 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(path string) (Fs, error) {
|
||||
fsInfo, configName, fsPath, err := ParseRemote(path)
|
||||
fsInfo, configName, fsPath, config, err := ConfigFs(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fsInfo.NewFs(configName, fsPath)
|
||||
return fsInfo.NewFs(configName, fsPath, config)
|
||||
}
|
||||
|
||||
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -52,3 +53,20 @@ func TestFeaturesDisableList(t *testing.T) {
|
||||
assert.False(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*Option)(nil)
|
||||
|
||||
func TestOption(t *testing.T) {
|
||||
d := &Option{
|
||||
Name: "potato",
|
||||
Value: SizeSuffix(17 << 20),
|
||||
}
|
||||
assert.Equal(t, "17M", d.String())
|
||||
assert.Equal(t, "SizeSuffix", d.Type())
|
||||
err := d.Set("18M")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
@@ -188,6 +188,12 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
|
||||
// Unwrap 1 level if possible
|
||||
err = errors.Cause(err)
|
||||
if err == nil {
|
||||
// errors.Cause can return nil which isn't
|
||||
// desirable so pick the previous error in
|
||||
// this case.
|
||||
err = prev
|
||||
}
|
||||
if err == prev {
|
||||
// Unpack any struct or *struct with a field
|
||||
// of name Err which satisfies the error
|
||||
@@ -196,11 +202,11 @@ func Cause(cause error) (retriable bool, err error) {
|
||||
// others in the stdlib
|
||||
errType := reflect.TypeOf(err)
|
||||
errValue := reflect.ValueOf(err)
|
||||
if errType.Kind() == reflect.Ptr {
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Ptr {
|
||||
errType = errType.Elem()
|
||||
errValue = errValue.Elem()
|
||||
}
|
||||
if errType.Kind() == reflect.Struct {
|
||||
if errValue.IsValid() && errType.Kind() == reflect.Struct {
|
||||
if errField := errValue.FieldByName("Err"); errField.IsValid() {
|
||||
errFieldValue := errField.Interface()
|
||||
if newErr, ok := errFieldValue.(error); ok {
|
||||
|
||||
@@ -39,7 +39,15 @@ type myError2 struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *myError2) Error() string { return e.Err.Error() }
|
||||
func (e *myError2) Error() string {
|
||||
if e == nil {
|
||||
return "myError2(nil)"
|
||||
}
|
||||
if e.Err == nil {
|
||||
return "myError2{Err: nil}"
|
||||
}
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
type myError3 struct {
|
||||
Err int
|
||||
@@ -53,11 +61,23 @@ type myError4 struct {
|
||||
|
||||
func (e *myError4) Error() string { return e.e.Error() }
|
||||
|
||||
type errorCause struct {
|
||||
e error
|
||||
}
|
||||
|
||||
func (e *errorCause) Error() string { return fmt.Sprintf("%#v", e) }
|
||||
|
||||
func (e *errorCause) Cause() error { return e.e }
|
||||
|
||||
func TestCause(t *testing.T) {
|
||||
e3 := &myError3{3}
|
||||
e4 := &myError4{io.EOF}
|
||||
|
||||
eNil1 := &myError2{nil}
|
||||
eNil2 := &myError2{Err: (*myError2)(nil)}
|
||||
errPotato := errors.New("potato")
|
||||
nilCause1 := &errorCause{nil}
|
||||
nilCause2 := &errorCause{(*myError2)(nil)}
|
||||
|
||||
for i, test := range []struct {
|
||||
err error
|
||||
wantRetriable bool
|
||||
@@ -70,10 +90,15 @@ func TestCause(t *testing.T) {
|
||||
{errUseOfClosedNetworkConnection, false, errUseOfClosedNetworkConnection},
|
||||
{makeNetErr(syscall.EAGAIN), true, syscall.EAGAIN},
|
||||
{makeNetErr(syscall.Errno(123123123)), false, syscall.Errno(123123123)},
|
||||
{eNil1, false, eNil1},
|
||||
{eNil2, false, eNil2.Err},
|
||||
{myError1{io.EOF}, false, io.EOF},
|
||||
{&myError2{io.EOF}, false, io.EOF},
|
||||
{e3, false, e3},
|
||||
{e4, false, e4},
|
||||
{&errorCause{errPotato}, false, errPotato},
|
||||
{nilCause1, false, nilCause1},
|
||||
{nilCause2, false, nilCause2.e},
|
||||
} {
|
||||
gotRetriable, gotErr := Cause(test.err)
|
||||
what := fmt.Sprintf("test #%d: %v", i, test.err)
|
||||
|
||||
@@ -3,27 +3,46 @@ package fspath
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ncw/rclone/fs/driveletter"
|
||||
)
|
||||
|
||||
// RemoteSplit splits a remote into a parent and a leaf
|
||||
// Matcher is a pattern to match an rclone URL
|
||||
var Matcher = regexp.MustCompile(`^([\w_ -]+):(.*)$`)
|
||||
|
||||
// Parse deconstructs a remote path into configName and fsPath
|
||||
//
|
||||
// If the path is a local path then configName will be returned as "".
|
||||
//
|
||||
// So "remote:path/to/dir" will return "remote", "path/to/dir"
|
||||
// and "/path/to/local" will return ("", "/path/to/local")
|
||||
//
|
||||
// Note that this will turn \ into / in the fsPath on Windows
|
||||
func Parse(path string) (configName, fsPath string) {
|
||||
parts := Matcher.FindStringSubmatch(path)
|
||||
configName, fsPath = "", path
|
||||
if parts != nil && !driveletter.IsDriveLetter(parts[1]) {
|
||||
configName, fsPath = parts[1], parts[2]
|
||||
}
|
||||
// change native directory separators to / if there are any
|
||||
fsPath = filepath.ToSlash(fsPath)
|
||||
return configName, fsPath
|
||||
}
|
||||
|
||||
// Split splits a remote into a parent and a leaf
|
||||
//
|
||||
// if it returns leaf as an empty string then remote is a directory
|
||||
//
|
||||
// if it returns parent as an empty string then that means the current directory
|
||||
//
|
||||
// The returned values have the property that parent + leaf == remote
|
||||
func RemoteSplit(remote string) (parent string, leaf string) {
|
||||
// Split remote on :
|
||||
i := strings.Index(remote, ":")
|
||||
remoteName := ""
|
||||
remotePath := remote
|
||||
if i >= 0 {
|
||||
remoteName = remote[:i+1]
|
||||
remotePath = remote[i+1:]
|
||||
} else if strings.HasSuffix(remotePath, "/") {
|
||||
// if no : and ends with / must be directory
|
||||
return remotePath, ""
|
||||
// (except under Windows where \ will be translated into /)
|
||||
func Split(remote string) (parent string, leaf string) {
|
||||
remoteName, remotePath := Parse(remote)
|
||||
if remoteName != "" {
|
||||
remoteName += ":"
|
||||
}
|
||||
// Construct new remote name without last segment
|
||||
parent, leaf = path.Split(remotePath)
|
||||
|
||||
@@ -7,8 +7,23 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRemoteSplit(t *testing.T) {
|
||||
func TestParse(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, wantConfigName, wantFsPath string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"/path/to/file", "", "/path/to/file"},
|
||||
{"path/to/file", "", "path/to/file"},
|
||||
{"remote:path/to/file", "remote", "path/to/file"},
|
||||
{"remote:/path/to/file", "remote", "/path/to/file"},
|
||||
} {
|
||||
gotConfigName, gotFsPath := Parse(test.in)
|
||||
assert.Equal(t, test.wantConfigName, gotConfigName)
|
||||
assert.Equal(t, test.wantFsPath, gotFsPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplit(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
remote, wantParent, wantLeaf string
|
||||
}{
|
||||
@@ -27,7 +42,7 @@ func TestRemoteSplit(t *testing.T) {
|
||||
{"root/", "root/", ""},
|
||||
{"a/b/", "a/b/", ""},
|
||||
} {
|
||||
gotParent, gotLeaf := RemoteSplit(test.remote)
|
||||
gotParent, gotLeaf := Split(test.remote)
|
||||
assert.Equal(t, test.wantParent, gotParent, test.remote)
|
||||
assert.Equal(t, test.wantLeaf, gotLeaf, test.remote)
|
||||
assert.Equal(t, test.remote, gotParent+gotLeaf, fmt.Sprintf("%s: %q + %q != %q", test.remote, gotParent, gotLeaf, test.remote))
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -17,6 +19,13 @@ func (d Duration) String() string {
|
||||
if d == DurationOff {
|
||||
return "off"
|
||||
}
|
||||
for i := len(ageSuffixes) - 2; i >= 0; i-- {
|
||||
ageSuffix := &ageSuffixes[i]
|
||||
if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) {
|
||||
timeUnits := float64(d) / float64(ageSuffix.Multiplier)
|
||||
return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix
|
||||
}
|
||||
}
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
@@ -30,10 +39,6 @@ var ageSuffixes = []struct {
|
||||
Suffix string
|
||||
Multiplier time.Duration
|
||||
}{
|
||||
{Suffix: "ms", Multiplier: time.Millisecond},
|
||||
{Suffix: "s", Multiplier: time.Second},
|
||||
{Suffix: "m", Multiplier: time.Minute},
|
||||
{Suffix: "h", Multiplier: time.Hour},
|
||||
{Suffix: "d", Multiplier: time.Hour * 24},
|
||||
{Suffix: "w", Multiplier: time.Hour * 24 * 7},
|
||||
{Suffix: "M", Multiplier: time.Hour * 24 * 30},
|
||||
@@ -51,6 +56,12 @@ func ParseDuration(age string) (time.Duration, error) {
|
||||
return time.Duration(DurationOff), nil
|
||||
}
|
||||
|
||||
// Attempt to parse as a time.Duration first
|
||||
d, err := time.ParseDuration(age)
|
||||
if err == nil {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
for _, ageSuffix := range ageSuffixes {
|
||||
if strings.HasSuffix(age, ageSuffix.Suffix) {
|
||||
numberString := age[:len(age)-len(ageSuffix.Suffix)]
|
||||
@@ -81,3 +92,12 @@ func (d *Duration) Set(s string) error {
|
||||
func (d Duration) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (d *Duration) Scan(s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return d.Set(string(token))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -23,6 +24,7 @@ func TestParseDuration(t *testing.T) {
|
||||
{"1ms", time.Millisecond, false},
|
||||
{"1s", time.Second, false},
|
||||
{"1m", time.Minute, false},
|
||||
{"1.5m", (3 * time.Minute) / 2, false},
|
||||
{"1h", time.Hour, false},
|
||||
{"1d", time.Hour * 24, false},
|
||||
{"1w", time.Hour * 24 * 7, false},
|
||||
@@ -33,6 +35,7 @@ func TestParseDuration(t *testing.T) {
|
||||
{"1.s", time.Second, false},
|
||||
{"1x", 0, true},
|
||||
{"off", time.Duration(DurationOff), false},
|
||||
{"1h2m3s", time.Hour + 2*time.Minute + 3*time.Second, false},
|
||||
} {
|
||||
duration, err := ParseDuration(test.in)
|
||||
if test.err {
|
||||
@@ -52,9 +55,39 @@ func TestDurationString(t *testing.T) {
|
||||
{time.Duration(0), "0s"},
|
||||
{time.Second, "1s"},
|
||||
{time.Minute, "1m0s"},
|
||||
{time.Millisecond, "1ms"},
|
||||
{time.Second, "1s"},
|
||||
{(3 * time.Minute) / 2, "1m30s"},
|
||||
{time.Hour, "1h0m0s"},
|
||||
{time.Hour * 24, "1d"},
|
||||
{time.Hour * 24 * 7, "1w"},
|
||||
{time.Hour * 24 * 30, "1M"},
|
||||
{time.Hour * 24 * 365, "1y"},
|
||||
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
|
||||
{-time.Second, "-1s"},
|
||||
{time.Second, "1s"},
|
||||
{time.Duration(DurationOff), "off"},
|
||||
{time.Hour + 2*time.Minute + 3*time.Second, "1h2m3s"},
|
||||
{time.Hour * 24, "1d"},
|
||||
{time.Hour * 24 * 7, "1w"},
|
||||
{time.Hour * 24 * 30, "1M"},
|
||||
{time.Hour * 24 * 365, "1y"},
|
||||
{time.Hour * 24 * 365 * 3 / 2, "1.5y"},
|
||||
{-time.Hour * 24 * 365 * 3 / 2, "-1.5y"},
|
||||
} {
|
||||
got := Duration(test.in).String()
|
||||
assert.Equal(t, test.want, got)
|
||||
// Test the reverse
|
||||
reverse, err := ParseDuration(test.want)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, reverse)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationScan(t *testing.T) {
|
||||
var v Duration
|
||||
n, err := fmt.Sscan(" 17m ", &v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, Duration(17*60*time.Second), v)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // install the pprof http handlers
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/cmd/serve/httplib"
|
||||
|
||||
@@ -110,3 +110,12 @@ func (x *SizeSuffix) Set(s string) error {
|
||||
func (x *SizeSuffix) Type() string {
|
||||
return "int64"
|
||||
}
|
||||
|
||||
// Scan implements the fmt.Scanner interface
|
||||
func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error {
|
||||
token, err := s.Token(true, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return x.Set(string(token))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
@@ -93,3 +94,11 @@ func TestSizeSuffixSet(t *testing.T) {
|
||||
assert.Equal(t, test.want, int64(ss))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSizeSuffixScan(t *testing.T) {
|
||||
var v SizeSuffix
|
||||
n, err := fmt.Sscan(" 17M ", &v)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, n)
|
||||
assert.Equal(t, SizeSuffix(17<<20), v)
|
||||
}
|
||||
|
||||
@@ -522,13 +522,18 @@ func copyEmptyDirectories(f fs.Fs, entries map[string]fs.DirEntry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func parentDirCheck(entries map[string]fs.DirEntry, entry fs.DirEntry) {
|
||||
func (s *syncCopyMove) srcParentDirCheck(entry fs.DirEntry) {
|
||||
// If we are moving files then we don't want to remove directories with files in them
|
||||
// from the srcEmptyDirs as we are about to move them making the directory empty.
|
||||
if s.DoMove {
|
||||
return
|
||||
}
|
||||
parentDir := path.Dir(entry.Remote())
|
||||
if parentDir == "." {
|
||||
parentDir = ""
|
||||
}
|
||||
if _, ok := entries[parentDir]; ok {
|
||||
delete(entries, parentDir)
|
||||
if _, ok := s.srcEmptyDirs[parentDir]; ok {
|
||||
delete(s.srcEmptyDirs, parentDir)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -771,10 +776,11 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
}
|
||||
switch x := src.(type) {
|
||||
case fs.Object:
|
||||
// Remove parent directory from srcEmptyDirs
|
||||
// If it's a copy operation,
|
||||
// remove parent directory from srcEmptyDirs
|
||||
// since it's not really empty
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
parentDirCheck(s.srcEmptyDirs, src)
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
|
||||
if s.trackRenames {
|
||||
@@ -796,7 +802,7 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
// Do the same thing to the entire contents of the directory
|
||||
// Record the directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
parentDirCheck(s.srcEmptyDirs, src)
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
@@ -811,7 +817,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
switch srcX := src.(type) {
|
||||
case fs.Object:
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
parentDirCheck(s.srcEmptyDirs, src)
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
|
||||
if s.deleteMode == fs.DeleteModeOnly {
|
||||
@@ -836,7 +842,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
if ok {
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
parentDirCheck(s.srcEmptyDirs, src)
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
|
||||
@@ -947,6 +947,52 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
}
|
||||
}
|
||||
|
||||
// Test move
|
||||
func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// run move with --delete-empty-src-dirs
|
||||
err := MoveDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Flocal,
|
||||
nil,
|
||||
[]string{},
|
||||
fs.GetModifyWindow(r.Flocal),
|
||||
)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := MoveDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Flocal,
|
||||
nil,
|
||||
[]string{
|
||||
"sub dir",
|
||||
"nested",
|
||||
"nested/sub dir",
|
||||
},
|
||||
fs.GetModifyWindow(r.Flocal),
|
||||
)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
// Test a server side move if possible, or the backup path if not
|
||||
func TestServerSideMove(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
@@ -85,9 +86,9 @@ type oldToken struct {
|
||||
|
||||
// GetToken returns the token saved in the config file under
|
||||
// section name.
|
||||
func GetToken(name string) (*oauth2.Token, error) {
|
||||
tokenString := config.FileGet(name, config.ConfigToken)
|
||||
if tokenString == "" {
|
||||
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
|
||||
tokenString, ok := m.Get(config.ConfigToken)
|
||||
if !ok || tokenString == "" {
|
||||
return nil, errors.New("empty token found - please run rclone config again")
|
||||
}
|
||||
token := new(oauth2.Token)
|
||||
@@ -110,7 +111,7 @@ func GetToken(name string) (*oauth2.Token, error) {
|
||||
token.RefreshToken = oldtoken.RefreshToken
|
||||
token.Expiry = oldtoken.Expiry
|
||||
// Save new format in config file
|
||||
err = PutToken(name, token, false)
|
||||
err = PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -120,14 +121,14 @@ func GetToken(name string) (*oauth2.Token, error) {
|
||||
// PutToken stores the token in the config file
|
||||
//
|
||||
// This saves the config file if it changes
|
||||
func PutToken(name string, token *oauth2.Token, newSection bool) error {
|
||||
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
|
||||
tokenBytes, err := json.Marshal(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tokenString := string(tokenBytes)
|
||||
old := config.FileGet(name, config.ConfigToken)
|
||||
if tokenString != old {
|
||||
old, ok := m.Get(config.ConfigToken)
|
||||
if !ok || tokenString != old {
|
||||
err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
|
||||
if newSection && err != nil {
|
||||
fs.Debugf(name, "Added new token to config, still needs to be saved")
|
||||
@@ -144,6 +145,7 @@ func PutToken(name string, token *oauth2.Token, newSection bool) error {
|
||||
type TokenSource struct {
|
||||
mu sync.Mutex
|
||||
name string
|
||||
m configmap.Mapper
|
||||
tokenSource oauth2.TokenSource
|
||||
token *oauth2.Token
|
||||
config *oauth2.Config
|
||||
@@ -176,7 +178,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
if ts.expiryTimer != nil {
|
||||
ts.expiryTimer.Reset(ts.timeToExpiry())
|
||||
}
|
||||
err = PutToken(ts.name, token, false)
|
||||
err = PutToken(ts.name, ts.m, token, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -229,27 +231,27 @@ func Context(client *http.Client) context.Context {
|
||||
// config file if they are not blank.
|
||||
// If any value is overridden, true is returned.
|
||||
// the origConfig is copied
|
||||
func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
||||
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
||||
newConfig = new(oauth2.Config)
|
||||
*newConfig = *origConfig
|
||||
changed = false
|
||||
ClientID := config.FileGet(name, config.ConfigClientID)
|
||||
if ClientID != "" {
|
||||
ClientID, ok := m.Get(config.ConfigClientID)
|
||||
if ok && ClientID != "" {
|
||||
newConfig.ClientID = ClientID
|
||||
changed = true
|
||||
}
|
||||
ClientSecret := config.FileGet(name, config.ConfigClientSecret)
|
||||
if ClientSecret != "" {
|
||||
ClientSecret, ok := m.Get(config.ConfigClientSecret)
|
||||
if ok && ClientSecret != "" {
|
||||
newConfig.ClientSecret = ClientSecret
|
||||
changed = true
|
||||
}
|
||||
AuthURL := config.FileGet(name, config.ConfigAuthURL)
|
||||
if AuthURL != "" {
|
||||
AuthURL, ok := m.Get(config.ConfigAuthURL)
|
||||
if ok && AuthURL != "" {
|
||||
newConfig.Endpoint.AuthURL = AuthURL
|
||||
changed = true
|
||||
}
|
||||
TokenURL := config.FileGet(name, config.ConfigTokenURL)
|
||||
if TokenURL != "" {
|
||||
TokenURL, ok := m.Get(config.ConfigTokenURL)
|
||||
if ok && TokenURL != "" {
|
||||
newConfig.Endpoint.TokenURL = TokenURL
|
||||
changed = true
|
||||
}
|
||||
@@ -260,9 +262,9 @@ func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oau
|
||||
// configures a Client with it. It returns the client and a
|
||||
// TokenSource which Invalidate may need to be called on. It uses the
|
||||
// httpClient passed in as the base client.
|
||||
func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
config, _ = overrideCredentials(name, config)
|
||||
token, err := GetToken(name)
|
||||
func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||
config, _ = overrideCredentials(name, m, config)
|
||||
token, err := GetToken(name, m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -274,6 +276,7 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt
|
||||
// tokens in the config file
|
||||
ts := &TokenSource{
|
||||
name: name,
|
||||
m: m,
|
||||
token: token,
|
||||
config: config,
|
||||
ctx: ctx,
|
||||
@@ -284,36 +287,37 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt
|
||||
|
||||
// NewClient gets a token from the config file and configures a Client
|
||||
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
||||
func NewClient(name string, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||
return NewClientWithBaseClient(name, oauthConfig, fshttp.NewClient(fs.Config))
|
||||
func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||
return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config))
|
||||
}
|
||||
|
||||
// Config does the initial creation of the token
|
||||
//
|
||||
// It may run an internal webserver to receive the results
|
||||
func Config(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, nil, config, true, opts)
|
||||
func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, nil, config, true, opts)
|
||||
}
|
||||
|
||||
// ConfigNoOffline does the same as Config but does not pass the
|
||||
// "access_type=offline" parameter.
|
||||
func ConfigNoOffline(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, nil, config, false, opts)
|
||||
func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, nil, config, false, opts)
|
||||
}
|
||||
|
||||
// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
|
||||
// This function gets called with the request made to rclone as a parameter if no code was found
|
||||
func ConfigErrorCheck(id, name string, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, errorHandler, config, true, opts)
|
||||
func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||
return doConfig(id, name, m, errorHandler, config, true, opts)
|
||||
}
|
||||
|
||||
func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
||||
oauthConfig, changed := overrideCredentials(name, oauthConfig)
|
||||
automatic := config.FileGet(name, config.ConfigAutomatic) != ""
|
||||
func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
||||
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
|
||||
auto, ok := m.Get(config.ConfigAutomatic)
|
||||
automatic := ok && auto != ""
|
||||
|
||||
// See if already have a token
|
||||
tokenString := config.FileGet(name, "token")
|
||||
if tokenString != "" {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return nil
|
||||
@@ -354,7 +358,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return PutToken(name, token, false)
|
||||
return PutToken(name, m, token, false)
|
||||
}
|
||||
case TitleBarRedirectURL:
|
||||
useWebServer = automatic
|
||||
@@ -436,7 +440,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth
|
||||
}
|
||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
||||
}
|
||||
return PutToken(name, token, true)
|
||||
return PutToken(name, m, token, true)
|
||||
}
|
||||
|
||||
// Local web server for collecting auth
|
||||
|
||||
@@ -143,6 +143,7 @@ type Opts struct {
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Close bool // set to close the connection after this transaction
|
||||
NoRedirect bool // if this is set then the client won't follow redirects
|
||||
}
|
||||
|
||||
// Copy creates a copy of the options
|
||||
@@ -189,6 +190,15 @@ func ClientWithHeaderReset(c *http.Client, headers map[string]string) *http.Clie
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// ClientWithNoRedirects makes a new http client which won't follow redirects
|
||||
func ClientWithNoRedirects(c *http.Client) *http.Client {
|
||||
clientCopy := *c
|
||||
clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
return &clientCopy
|
||||
}
|
||||
|
||||
// Call makes the call and returns the http.Response
|
||||
//
|
||||
// if err != nil then resp.Body will need to be closed
|
||||
@@ -252,7 +262,12 @@ func (api *Client) Call(opts *Opts) (resp *http.Response, err error) {
|
||||
if opts.UserName != "" || opts.Password != "" {
|
||||
req.SetBasicAuth(opts.UserName, opts.Password)
|
||||
}
|
||||
c := ClientWithHeaderReset(api.c, headers)
|
||||
var c *http.Client
|
||||
if opts.NoRedirect {
|
||||
c = ClientWithNoRedirects(api.c)
|
||||
} else {
|
||||
c = ClientWithHeaderReset(api.c, headers)
|
||||
}
|
||||
if api.signer != nil {
|
||||
err = api.signer(req)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/ncw/rclone/cmd"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/all" // import all backends
|
||||
@@ -13,7 +11,5 @@ import (
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := cmd.Root.Execute(); err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
cmd.Main()
|
||||
}
|
||||
|
||||
288
vendor/github.com/Azure/azure-pipeline-go/.gitignore
generated
vendored
Normal file
288
vendor/github.com/Azure/azure-pipeline-go/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
## Ignore Visual Studio temporary files, build results, and
|
||||
## files generated by popular Visual Studio add-ons.
|
||||
##
|
||||
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
*.userosscache
|
||||
*.sln.docstates
|
||||
|
||||
# User-specific files (MonoDevelop/Xamarin Studio)
|
||||
*.userprefs
|
||||
|
||||
# Build results
|
||||
[Dd]ebug/
|
||||
[Dd]ebugPublic/
|
||||
[Rr]elease/
|
||||
[Rr]eleases/
|
||||
x64/
|
||||
x86/
|
||||
bld/
|
||||
[Bb]in/
|
||||
[Oo]bj/
|
||||
[Ll]og/
|
||||
|
||||
# Visual Studio 2015 cache/options directory
|
||||
.vs/
|
||||
# Uncomment if you have tasks that create the project's static files in wwwroot
|
||||
#wwwroot/
|
||||
|
||||
# MSTest test Results
|
||||
[Tt]est[Rr]esult*/
|
||||
[Bb]uild[Ll]og.*
|
||||
|
||||
# NUNIT
|
||||
*.VisualState.xml
|
||||
TestResult.xml
|
||||
|
||||
# Build Results of an ATL Project
|
||||
[Dd]ebugPS/
|
||||
[Rr]eleasePS/
|
||||
dlldata.c
|
||||
|
||||
# .NET Core
|
||||
project.lock.json
|
||||
project.fragment.lock.json
|
||||
artifacts/
|
||||
**/Properties/launchSettings.json
|
||||
|
||||
*_i.c
|
||||
*_p.c
|
||||
*_i.h
|
||||
*.ilk
|
||||
*.meta
|
||||
*.obj
|
||||
*.pch
|
||||
*.pdb
|
||||
*.pgc
|
||||
*.pgd
|
||||
*.rsp
|
||||
*.sbr
|
||||
*.tlb
|
||||
*.tli
|
||||
*.tlh
|
||||
*.tmp
|
||||
*.tmp_proj
|
||||
*.log
|
||||
*.vspscc
|
||||
*.vssscc
|
||||
.builds
|
||||
*.pidb
|
||||
*.svclog
|
||||
*.scc
|
||||
|
||||
# Chutzpah Test files
|
||||
_Chutzpah*
|
||||
|
||||
# Visual C++ cache files
|
||||
ipch/
|
||||
*.aps
|
||||
*.ncb
|
||||
*.opendb
|
||||
*.opensdf
|
||||
*.sdf
|
||||
*.cachefile
|
||||
*.VC.db
|
||||
*.VC.VC.opendb
|
||||
|
||||
# Visual Studio profiler
|
||||
*.psess
|
||||
*.vsp
|
||||
*.vspx
|
||||
*.sap
|
||||
|
||||
# TFS 2012 Local Workspace
|
||||
$tf/
|
||||
|
||||
# Guidance Automation Toolkit
|
||||
*.gpState
|
||||
|
||||
# ReSharper is a .NET coding add-in
|
||||
_ReSharper*/
|
||||
*.[Rr]e[Ss]harper
|
||||
*.DotSettings.user
|
||||
|
||||
# JustCode is a .NET coding add-in
|
||||
.JustCode
|
||||
|
||||
# TeamCity is a build add-in
|
||||
_TeamCity*
|
||||
|
||||
# DotCover is a Code Coverage Tool
|
||||
*.dotCover
|
||||
|
||||
# Visual Studio code coverage results
|
||||
*.coverage
|
||||
*.coveragexml
|
||||
|
||||
# NCrunch
|
||||
_NCrunch_*
|
||||
.*crunch*.local.xml
|
||||
nCrunchTemp_*
|
||||
|
||||
# MightyMoose
|
||||
*.mm.*
|
||||
AutoTest.Net/
|
||||
|
||||
# Web workbench (sass)
|
||||
.sass-cache/
|
||||
|
||||
# Installshield output folder
|
||||
[Ee]xpress/
|
||||
|
||||
# DocProject is a documentation generator add-in
|
||||
DocProject/buildhelp/
|
||||
DocProject/Help/*.HxT
|
||||
DocProject/Help/*.HxC
|
||||
DocProject/Help/*.hhc
|
||||
DocProject/Help/*.hhk
|
||||
DocProject/Help/*.hhp
|
||||
DocProject/Help/Html2
|
||||
DocProject/Help/html
|
||||
|
||||
# Click-Once directory
|
||||
publish/
|
||||
|
||||
# Publish Web Output
|
||||
*.[Pp]ublish.xml
|
||||
*.azurePubxml
|
||||
# TODO: Comment the next line if you want to checkin your web deploy settings
|
||||
# but database connection strings (with potential passwords) will be unencrypted
|
||||
*.pubxml
|
||||
*.publishproj
|
||||
|
||||
# Microsoft Azure Web App publish settings. Comment the next line if you want to
|
||||
# checkin your Azure Web App publish settings, but sensitive information contained
|
||||
# in these scripts will be unencrypted
|
||||
PublishScripts/
|
||||
|
||||
# NuGet Packages
|
||||
*.nupkg
|
||||
# The packages folder can be ignored because of Package Restore
|
||||
**/packages/*
|
||||
# except build/, which is used as an MSBuild target.
|
||||
!**/packages/build/
|
||||
# Uncomment if necessary however generally it will be regenerated when needed
|
||||
#!**/packages/repositories.config
|
||||
# NuGet v3's project.json files produces more ignorable files
|
||||
*.nuget.props
|
||||
*.nuget.targets
|
||||
|
||||
# Microsoft Azure Build Output
|
||||
csx/
|
||||
*.build.csdef
|
||||
|
||||
# Microsoft Azure Emulator
|
||||
ecf/
|
||||
rcf/
|
||||
|
||||
# Windows Store app package directories and files
|
||||
AppPackages/
|
||||
BundleArtifacts/
|
||||
Package.StoreAssociation.xml
|
||||
_pkginfo.txt
|
||||
|
||||
# Visual Studio cache files
|
||||
# files ending in .cache can be ignored
|
||||
*.[Cc]ache
|
||||
# but keep track of directories ending in .cache
|
||||
!*.[Cc]ache/
|
||||
|
||||
# Others
|
||||
ClientBin/
|
||||
~$*
|
||||
*~
|
||||
*.dbmdl
|
||||
*.dbproj.schemaview
|
||||
*.jfm
|
||||
*.pfx
|
||||
*.publishsettings
|
||||
orleans.codegen.cs
|
||||
|
||||
# Since there are multiple workflows, uncomment next line to ignore bower_components
|
||||
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
|
||||
#bower_components/
|
||||
|
||||
# RIA/Silverlight projects
|
||||
Generated_Code/
|
||||
|
||||
# Backup & report files from converting an old project file
|
||||
# to a newer Visual Studio version. Backup files are not needed,
|
||||
# because we have git ;-)
|
||||
_UpgradeReport_Files/
|
||||
Backup*/
|
||||
UpgradeLog*.XML
|
||||
UpgradeLog*.htm
|
||||
|
||||
# SQL Server files
|
||||
*.mdf
|
||||
*.ldf
|
||||
*.ndf
|
||||
|
||||
# Business Intelligence projects
|
||||
*.rdl.data
|
||||
*.bim.layout
|
||||
*.bim_*.settings
|
||||
|
||||
# Microsoft Fakes
|
||||
FakesAssemblies/
|
||||
|
||||
# GhostDoc plugin setting file
|
||||
*.GhostDoc.xml
|
||||
|
||||
# Node.js Tools for Visual Studio
|
||||
.ntvs_analysis.dat
|
||||
node_modules/
|
||||
|
||||
# Typescript v1 declaration files
|
||||
typings/
|
||||
|
||||
# Visual Studio 6 build log
|
||||
*.plg
|
||||
|
||||
# Visual Studio 6 workspace options file
|
||||
*.opt
|
||||
|
||||
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
|
||||
*.vbw
|
||||
|
||||
# Visual Studio LightSwitch build output
|
||||
**/*.HTMLClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/GeneratedArtifacts
|
||||
**/*.DesktopClient/ModelManifest.xml
|
||||
**/*.Server/GeneratedArtifacts
|
||||
**/*.Server/ModelManifest.xml
|
||||
_Pvt_Extensions
|
||||
|
||||
# Paket dependency manager
|
||||
.paket/paket.exe
|
||||
paket-files/
|
||||
|
||||
# FAKE - F# Make
|
||||
.fake/
|
||||
|
||||
# JetBrains Rider
|
||||
.idea/
|
||||
*.sln.iml
|
||||
|
||||
# CodeRush
|
||||
.cr/
|
||||
|
||||
# Python Tools for Visual Studio (PTVS)
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
# Cake - Uncomment if you are using it
|
||||
# tools/**
|
||||
# !tools/packages.config
|
||||
|
||||
# Telerik's JustMock configuration file
|
||||
*.jmconfig
|
||||
|
||||
# BizTalk build output
|
||||
*.btp.cs
|
||||
*.btm.cs
|
||||
*.odx.cs
|
||||
*.xsd.cs
|
||||
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
14
vendor/github.com/Azure/azure-pipeline-go/README.md
generated
vendored
Normal file
14
vendor/github.com/Azure/azure-pipeline-go/README.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
# Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
255
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Executable file
255
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Executable file
@@ -0,0 +1,255 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
|
||||
// requires that this Factory create a new instance of its Policy object.
|
||||
type Factory interface {
|
||||
New(next Policy, po *PolicyOptions) Policy
|
||||
}
|
||||
|
||||
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
|
||||
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
|
||||
|
||||
// New calls f(next,po).
|
||||
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
|
||||
return f(next, po)
|
||||
}
|
||||
|
||||
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
|
||||
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
|
||||
// Response goes backward through the linked-list for additional processing.
|
||||
// NOTE: Request is passed by value so changes do not change the caller's version of
|
||||
// the request. However, Request has some fields that reference mutable objects (not strings).
|
||||
// These references are copied; a deep copy is not performed. Specifically, this means that
|
||||
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
|
||||
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
|
||||
type Policy interface {
|
||||
Do(ctx context.Context, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
|
||||
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
|
||||
|
||||
// Do calls f(ctx, request).
|
||||
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
|
||||
return f(ctx, request)
|
||||
}
|
||||
|
||||
// Options configures a Pipeline's behavior.
|
||||
type Options struct {
|
||||
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
|
||||
Log LogOptions
|
||||
}
|
||||
|
||||
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LogLevel uint32
|
||||
|
||||
const (
|
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LogLevel = iota
|
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal
|
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic
|
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError
|
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning
|
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo
|
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// LogOptions configures the pipeline's logging mechanism & level filtering.
|
||||
type LogOptions struct {
|
||||
Log func(level LogLevel, message string)
|
||||
|
||||
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
|
||||
// An application can return different values over the its lifetime; this allows the application to dynamically
|
||||
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
|
||||
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
|
||||
// Usually, the function will be implemented simply like this: return level <= LogWarning
|
||||
ShouldLog func(level LogLevel) bool
|
||||
}
|
||||
|
||||
type pipeline struct {
|
||||
factories []Factory
|
||||
options Options
|
||||
}
|
||||
|
||||
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
|
||||
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
|
||||
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
|
||||
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
|
||||
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
|
||||
//
|
||||
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
|
||||
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
|
||||
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
|
||||
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
|
||||
}
|
||||
|
||||
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
|
||||
func NewPipeline(factories []Factory, o Options) Pipeline {
|
||||
if o.HTTPSender == nil {
|
||||
o.HTTPSender = newDefaultHTTPClientFactory()
|
||||
}
|
||||
if o.Log.Log == nil {
|
||||
o.Log.Log = func(LogLevel, string) {} // No-op logger
|
||||
}
|
||||
return &pipeline{factories: factories, options: o}
|
||||
}
|
||||
|
||||
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
|
||||
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
|
||||
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
|
||||
// ultimately sends the transformed HTTP request over the network.
|
||||
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
|
||||
response, err := p.newPolicies(methodFactory).Do(ctx, request)
|
||||
request.close()
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
|
||||
// The last Policy is the one that actually sends the request over the wire and gets the response.
|
||||
// It is overridable via the Options' HTTPSender field.
|
||||
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
|
||||
next := p.options.HTTPSender.New(nil, po)
|
||||
|
||||
// Walk over the slice of Factory objects in reverse (from wire to API)
|
||||
markers := 0
|
||||
for i := len(p.factories) - 1; i >= 0; i-- {
|
||||
factory := p.factories[i]
|
||||
if _, ok := factory.(methodFactoryMarker); ok {
|
||||
markers++
|
||||
if markers > 1 {
|
||||
panic("MethodFactoryMarker can only appear once in the pipeline")
|
||||
}
|
||||
if methodFactory != nil {
|
||||
// Replace MethodFactoryMarker with passed-in methodFactory
|
||||
next = methodFactory.New(next, po)
|
||||
}
|
||||
} else {
|
||||
// Use the slice's Factory to construct its Policy
|
||||
next = factory.New(next, po)
|
||||
}
|
||||
}
|
||||
|
||||
// Each Factory has created its Policy
|
||||
if markers == 0 && methodFactory != nil {
|
||||
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
|
||||
}
|
||||
return next // Return head of the Policy object linked-list
|
||||
}
|
||||
|
||||
// A PolicyOptions represents optional information that can be used by a node in the
|
||||
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
|
||||
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
|
||||
// uses the options to perform logging. But, in the future, this could be used for more.
|
||||
type PolicyOptions struct {
|
||||
pipeline *pipeline
|
||||
}
|
||||
|
||||
// ShouldLog returns true if the specified log level should be logged.
|
||||
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
|
||||
if po.pipeline.options.Log.ShouldLog != nil {
|
||||
return po.pipeline.options.Log.ShouldLog(level)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Log logs a string to the Pipeline's Logger.
|
||||
func (po *PolicyOptions) Log(level LogLevel, msg string) {
|
||||
if !po.ShouldLog(level) {
|
||||
return // Short circuit message formatting if we're not logging it
|
||||
}
|
||||
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
po.pipeline.options.Log.Log(level, msg)
|
||||
|
||||
// If logger doesn't handle fatal/panic, we'll do it here.
|
||||
if level == LogFatal {
|
||||
os.Exit(1)
|
||||
} else if level == LogPanic {
|
||||
panic(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var pipelineHTTPClient = newDefaultHTTPClient()
|
||||
|
||||
func newDefaultHTTPClient() *http.Client {
|
||||
// We want the Transport to have a large connection pool
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
||||
Dial /*Context*/ : (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).Dial, /*Context*/
|
||||
MaxIdleConns: 0, // No limit
|
||||
MaxIdleConnsPerHost: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableKeepAlives: false,
|
||||
DisableCompression: false,
|
||||
MaxResponseHeaderBytes: 0,
|
||||
//ResponseHeaderTimeout: time.Duration{},
|
||||
//ExpectContinueTimeout: time.Duration{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
|
||||
func newDefaultHTTPClientFactory() Factory {
|
||||
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
|
||||
return func(ctx context.Context, request Request) (Response, error) {
|
||||
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
|
||||
if err != nil {
|
||||
err = NewError(err, "HTTP request failed")
|
||||
}
|
||||
return NewHTTPResponse(r), err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var mfm = methodFactoryMarker{} // Singleton
|
||||
|
||||
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
|
||||
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
|
||||
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
|
||||
func MethodFactoryMarker() Factory {
|
||||
return mfm
|
||||
}
|
||||
|
||||
type methodFactoryMarker struct {
|
||||
}
|
||||
|
||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
||||
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
||||
}
|
||||
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Executable file
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Executable file
@@ -0,0 +1,33 @@
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func ForceLog(level LogLevel, msg string) {
|
||||
if defaultLogger == nil {
|
||||
return // Return fast if we failed to create the logger.
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
switch level {
|
||||
case LogFatal:
|
||||
defaultLogger.Fatal(msg)
|
||||
case LogPanic:
|
||||
defaultLogger.Panic(msg)
|
||||
case LogError, LogWarning, LogInfo:
|
||||
defaultLogger.Print(msg)
|
||||
}
|
||||
}
|
||||
|
||||
var defaultLogger = func() *log.Logger {
|
||||
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
|
||||
return l
|
||||
}()
|
||||
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Executable file
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Executable file
@@ -0,0 +1,61 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
||||
func ForceLog(level LogLevel, msg string) {
|
||||
var el eventType
|
||||
switch level {
|
||||
case LogError, LogFatal, LogPanic:
|
||||
el = elError
|
||||
case LogWarning:
|
||||
el = elWarning
|
||||
case LogInfo:
|
||||
el = elInfo
|
||||
}
|
||||
// We are logging it, ensure trailing newline
|
||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
||||
msg += "\n" // Ensure trailing newline
|
||||
}
|
||||
reportEvent(el, 0, msg)
|
||||
}
|
||||
|
||||
type eventType int16
|
||||
|
||||
const (
|
||||
elSuccess eventType = 0
|
||||
elError eventType = 1
|
||||
elWarning eventType = 2
|
||||
elInfo eventType = 4
|
||||
)
|
||||
|
||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
||||
advAPI32 := syscall.MustLoadDLL("AdvAPI32.dll")
|
||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
||||
|
||||
sourceName, _ := os.Executable()
|
||||
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
|
||||
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
|
||||
if lastErr == nil { // On error, logging is a no-op
|
||||
return func(eventType eventType, eventID int32, msg string) {}
|
||||
}
|
||||
reportEvent := advAPI32.MustFindProc("ReportEventW")
|
||||
return func(eventType eventType, eventID int32, msg string) {
|
||||
s, _ := syscall.UTF16PtrFromString(msg)
|
||||
_, _, _ = reportEvent.Call(
|
||||
uintptr(handle), // HANDLE hEventLog
|
||||
uintptr(eventType), // WORD wType
|
||||
uintptr(0), // WORD wCategory
|
||||
uintptr(eventID), // DWORD dwEventID
|
||||
uintptr(0), // PSID lpUserSid
|
||||
uintptr(1), // WORD wNumStrings
|
||||
uintptr(0), // DWORD dwDataSize
|
||||
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
|
||||
uintptr(0)) // LPVOID lpRawData
|
||||
}
|
||||
}()
|
||||
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Executable file
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Executable file
@@ -0,0 +1,161 @@
|
||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
||||
// Use of this source code is governed by an MIT
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pipeline implements an HTTP request/response middleware pipeline whose
|
||||
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
|
||||
the request is sent over the wire.
|
||||
|
||||
Not all policy objects mutate an HTTP request; some policy objects simply impact the
|
||||
flow of requests/responses by performing operations such as logging, retry policies,
|
||||
timeouts, failure injection, and deserialization of response payloads.
|
||||
|
||||
Implementing the Policy Interface
|
||||
|
||||
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
|
||||
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
|
||||
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
|
||||
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
|
||||
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
|
||||
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
|
||||
|
||||
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
|
||||
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
|
||||
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
|
||||
to the code that initiated the original HTTP request.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
type myPolicy struct {
|
||||
node PolicyNode
|
||||
// TODO: Add configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
||||
// TODO: Mutate/process the HTTP request here...
|
||||
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
|
||||
// TODO: Mutate/process the HTTP response here...
|
||||
return response, err // Return response/error to previous Policy
|
||||
}
|
||||
|
||||
Implementing the Factory Interface
|
||||
|
||||
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
|
||||
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
|
||||
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
|
||||
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
|
||||
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
|
||||
|
||||
Here is a template for how to define a pipeline.Policy object:
|
||||
|
||||
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
|
||||
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
|
||||
type myPolicyFactory struct {
|
||||
// TODO: Add any configuration/setting fields if desired...
|
||||
}
|
||||
|
||||
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
|
||||
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
|
||||
}
|
||||
|
||||
Using your Factory and Policy objects via a Pipeline
|
||||
|
||||
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
|
||||
this slice to the pipeline.NewPipeline function.
|
||||
|
||||
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
|
||||
|
||||
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
|
||||
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
|
||||
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
|
||||
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
|
||||
or other objects that can simulate the network requests for testing purposes.
|
||||
|
||||
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
|
||||
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
|
||||
context.Context for cancelling the HTTP request (if desired).
|
||||
|
||||
type Pipeline interface {
|
||||
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
|
||||
}
|
||||
|
||||
Do iterates over the slice of Factory objects and tells each one to create its corresponding
|
||||
Policy object. After the linked-list of Policy objects have been created, Do calls the first
|
||||
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
|
||||
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
|
||||
The last Policy object sends the message over the network.
|
||||
|
||||
When the network operation completes, the HTTP response and error return values pass
|
||||
back through the same Policy objects in reverse order. Most Policy objects ignore the
|
||||
response/error but some log the result, retry the operation (depending on the exact
|
||||
reason the operation failed), or deserialize the response's body. Your own Policy
|
||||
objects can do whatever they like when processing outgoing requests or incoming responses.
|
||||
|
||||
Note that after an I/O request runs to completion, the Policy objects for that request
|
||||
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
|
||||
them to be created once and reused over many I/O operations. This allows for efficient use of
|
||||
memory and also makes them safely usable by multiple goroutines concurrently.
|
||||
|
||||
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
|
||||
|
||||
While Pipeline and Factory objects can be reused over many different operations, it is
|
||||
common to have special behavior for a specific operation/method. For example, a method
|
||||
may need to deserialize the response's body to an instance of a specific data type.
|
||||
To accommodate this, the Pipeline's Do method takes an additional method-specific
|
||||
Factory object. The Do method tells this Factory to create a Policy object and
|
||||
injects this method-specific Policy object into the linked-list of Policy objects.
|
||||
|
||||
When creating a Pipeline object, the slice of Factory objects passed must have 1
|
||||
(and only 1) entry marking where the method-specific Factory should be injected.
|
||||
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
|
||||
|
||||
func MethodFactoryMarker() pipeline.Factory
|
||||
|
||||
Creating an HTTP Request Object
|
||||
|
||||
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
|
||||
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
|
||||
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
|
||||
|
||||
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
|
||||
|
||||
To this function, you must pass a pipeline.RequestOptions that looks like this:
|
||||
|
||||
type RequestOptions struct {
|
||||
// The readable and seekable stream to be sent to the server as the request's body.
|
||||
Body io.ReadSeeker
|
||||
|
||||
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
|
||||
Progress ProgressReceiver
|
||||
}
|
||||
|
||||
The method and struct ensure that the request's body stream is a read/seekable stream.
|
||||
A seekable stream is required so that upon retry, the final Policy object can seek
|
||||
the stream back to the beginning before retrying the network request and re-uploading the
|
||||
body. In addition, you can associate a ProgressReceiver callback function which will be
|
||||
invoked periodically to report progress while bytes are being read from the body stream
|
||||
and sent over the network.
|
||||
|
||||
Processing the HTTP Response
|
||||
|
||||
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
|
||||
embedded in a struct that implements the pipeline.Response interface:
|
||||
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
This interface is returned through all the Policy objects. Each Policy object can call the Response
|
||||
interface's Response method to examine (or mutate) the embedded http.Response object.
|
||||
|
||||
A Policy object can internally define another struct (implementing the pipeline.Response interface)
|
||||
that embeds an http.Response and adds additional fields and return this structure to other Policy
|
||||
objects. This allows a Policy object to deserialize the body to some other struct and return the
|
||||
original http.Response and the additional struct back through the Policy chain. Other Policy objects
|
||||
can see the Response but cannot see the additional struct with the deserialized body. After all the
|
||||
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
|
||||
The caller of this method can perform a type assertion attempting to get back to the struct type
|
||||
really returned by the Policy object. If the type assertion is successful, the caller now has
|
||||
access to both the http.Response and the deserialized struct object.*/
|
||||
package pipeline
|
||||
121
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Executable file
121
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Executable file
@@ -0,0 +1,121 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
// ErrorNode can be an embedded field in a private error object. This field
|
||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
||||
// When initializing a error type with this embedded field, initialize the
|
||||
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
|
||||
type ErrorNode struct {
|
||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
||||
cause error // Refers to the preceding error (or nil)
|
||||
}
|
||||
|
||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
||||
// When defining a new error type, have its Error method call this one passing
|
||||
// it the string representation of the error.
|
||||
func (e *ErrorNode) Error(msg string) string {
|
||||
s := ""
|
||||
if fn := runtime.FuncForPC(e.pc); fn != nil {
|
||||
file, line := fn.FileLine(e.pc)
|
||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
||||
}
|
||||
s += msg + "\n\n"
|
||||
if e.cause != nil {
|
||||
s += e.cause.Error() + "\n"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Cause returns the error that preceded this error.
|
||||
func (e *ErrorNode) Cause() error { return e.cause }
|
||||
|
||||
// Temporary returns true if the error occurred due to a temporary condition.
|
||||
func (e ErrorNode) Temporary() bool {
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(temporary); ok {
|
||||
return t.Temporary()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Timeout returns true if the error occurred due to time expiring.
|
||||
func (e ErrorNode) Timeout() bool {
|
||||
type timeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
for err := e.cause; err != nil; {
|
||||
if t, ok := err.(timeout); ok {
|
||||
return t.Timeout()
|
||||
}
|
||||
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Initialize is used to initialize an embedded ErrorNode field.
|
||||
// It captures the caller's program counter and saves the cause (preceding error).
|
||||
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
|
||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
||||
// a different value.
|
||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
||||
// Get the PC of Initialize method's caller.
|
||||
pc := [1]uintptr{}
|
||||
_ = runtime.Callers(callersToSkip, pc[:])
|
||||
return ErrorNode{pc: pc[0], cause: cause}
|
||||
}
|
||||
|
||||
// Cause walks all the preceding errors and return the originating error.
|
||||
func Cause(err error) error {
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewError creates a simple string error (like Error.New). But, this
|
||||
// error also captures the caller's Program Counter and the preceding error.
|
||||
func NewError(cause error, msg string) error {
|
||||
return &pcError{
|
||||
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
||||
msg: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
||||
type pcError struct {
|
||||
ErrorNode
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error satisfies the error interface. It shows the error with Program Counter
|
||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
||||
75
vendor/github.com/Azure/azure-pipeline-go/pipeline/policies_test.go
generated
vendored
Executable file
75
vendor/github.com/Azure/azure-pipeline-go/pipeline/policies_test.go
generated
vendored
Executable file
@@ -0,0 +1,75 @@
|
||||
package pipeline_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
||||
)
|
||||
|
||||
// Here is the template for defining your own Factory & Policy:
|
||||
|
||||
// newMyPolicyFactory creates a 'My' policy factory. Make this function
|
||||
// public if this should be callable from another package; everything
|
||||
// else about the factory/policy should remain private to the package.
|
||||
func newMyPolicyFactory( /* Desired parameters */ ) pipeline.Factory {
|
||||
return &myPolicyFactory{ /* Set desired fields */ }
|
||||
}
|
||||
|
||||
type myPolicyFactory struct {
|
||||
// Desired fields (goroutine-safe because the factory is shared by many Policy objects)
|
||||
}
|
||||
|
||||
// New initializes a Xxx policy object.
|
||||
func (f *myPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
||||
return &myPolicy{next: next, po: po /* Set desired fields */}
|
||||
}
|
||||
|
||||
type myPolicy struct {
|
||||
next pipeline.Policy
|
||||
po *pipeline.PolicyOptions // Optional private field
|
||||
// Additional desired fields (mutable for use by this specific Policy object)
|
||||
}
|
||||
|
||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
// TODO: Put your policy behavior code here
|
||||
// Your code should NOT mutate the ctx or request parameters
|
||||
// However, you can make a copy of the request and mutate the copy
|
||||
// You can also pass a different Context on.
|
||||
// You can optionally use po (PolicyOptions) in this func.
|
||||
|
||||
// Forward the request to the next node in the pipeline:
|
||||
response, err = p.next.Do(ctx, request)
|
||||
|
||||
// Process the response here. You can deserialize the body into an object.
|
||||
// If you do this, also define a struct that wraps an http.Response & your
|
||||
// deserialized struct. Have your wrapper struct implement the
|
||||
// pipeline.Response interface and then return your struct (via the interface)
|
||||
// After the pipeline completes, take response and perform a type assertion
|
||||
// to get back to the wrapper struct so you can access the deserialized object.
|
||||
|
||||
return // Return the response & err
|
||||
}
|
||||
|
||||
func newMyPolicyFactory2( /* Desired parameters */ ) pipeline.Factory {
|
||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
||||
// TODO: Put your policy behavior code here
|
||||
// Your code should NOT mutate the ctx or request parameters
|
||||
// However, you can make a copy of the request and mutate the copy
|
||||
// You can also pass a different Context on.
|
||||
// You can optionally use po (PolicyOptions) in this func.
|
||||
|
||||
// Forward the request to the next node in the pipeline:
|
||||
response, err = next.Do(ctx, request)
|
||||
|
||||
// Process the response here. You can deserialize the body into an object.
|
||||
// If you do this, also define a struct that wraps an http.Response & your
|
||||
// deserialized struct. Have your wrapper struct implement the
|
||||
// pipeline.Response interface and then return your struct (via the interface)
|
||||
// After the pipeline completes, take response and perform a type assertion
|
||||
// to get back to the wrapper struct so you can access the deserialized object.
|
||||
|
||||
return // Return the response & err
|
||||
}
|
||||
})
|
||||
}
|
||||
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Executable file
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Executable file
@@ -0,0 +1,82 @@
|
||||
package pipeline
|
||||
|
||||
import "io"
|
||||
|
||||
// ********** The following is common between the request body AND the response body.
|
||||
|
||||
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
|
||||
type ProgressReceiver func(bytesTransferred int64)
|
||||
|
||||
// ********** The following are specific to the request body (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type requestBodyProgress struct {
|
||||
requestBody io.ReadSeeker // Seeking is required to support retries
|
||||
pr ProgressReceiver
|
||||
}
|
||||
|
||||
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
|
||||
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &requestBodyProgress{requestBody: requestBody, pr: pr}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.requestBody.Read(p)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Invokes the user's callback method to report progress
|
||||
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rbp.pr(position)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return rbp.requestBody.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
|
||||
func (rbp *requestBodyProgress) Close() error {
|
||||
if c, ok := rbp.requestBody.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following are specific to the response body (a ReadCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type responseBodyProgress struct {
|
||||
responseBody io.ReadCloser
|
||||
pr ProgressReceiver
|
||||
offset int64
|
||||
}
|
||||
|
||||
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
|
||||
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
|
||||
if pr == nil {
|
||||
panic("pr must not be nil")
|
||||
}
|
||||
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = rbp.responseBody.Read(p)
|
||||
rbp.offset += int64(n)
|
||||
|
||||
// Invokes the user's callback method to report progress
|
||||
rbp.pr(rbp.offset)
|
||||
return
|
||||
}
|
||||
|
||||
func (rbp *responseBodyProgress) Close() error {
|
||||
return rbp.responseBody.Close()
|
||||
}
|
||||
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Executable file
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Executable file
@@ -0,0 +1,147 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
|
||||
type Request struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
// NewRequest initializes a new HTTP request object with any desired options.
|
||||
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
|
||||
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
|
||||
|
||||
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
|
||||
request.Request = &http.Request{
|
||||
Method: method,
|
||||
URL: &url,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: url.Host,
|
||||
}
|
||||
|
||||
if body != nil {
|
||||
err = request.SetBody(body)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetBody sets the body and content length, assumes body is not nil.
|
||||
func (r Request) SetBody(body io.ReadSeeker) error {
|
||||
size, err := body.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body.Seek(0, io.SeekStart)
|
||||
r.ContentLength = size
|
||||
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
|
||||
|
||||
if size != 0 {
|
||||
r.Body = &retryableRequestBody{body: body}
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
_, err := body.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Body, nil
|
||||
}
|
||||
} else {
|
||||
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
|
||||
r.Body = http.NoBody
|
||||
r.GetBody = func() (io.ReadCloser, error) {
|
||||
return http.NoBody, nil
|
||||
}
|
||||
|
||||
// close the user-provided empty body
|
||||
if c, ok := body.(io.Closer); ok {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
|
||||
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
|
||||
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
|
||||
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
|
||||
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
|
||||
func (r Request) Copy() Request {
|
||||
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
|
||||
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
|
||||
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
|
||||
}
|
||||
copy := *r.Request // Copy the request
|
||||
urlCopy := *(r.Request.URL) // Copy the URL
|
||||
copy.URL = &urlCopy
|
||||
copy.Header = http.Header{} // Copy the header
|
||||
for k, vs := range r.Header {
|
||||
for _, value := range vs {
|
||||
copy.Header.Add(k, value)
|
||||
}
|
||||
}
|
||||
return Request{Request: ©} // Return the copy
|
||||
}
|
||||
|
||||
func (r Request) close() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
c, ok := r.Body.(*retryableRequestBody)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
|
||||
}
|
||||
return c.realClose()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
||||
func (r Request) RewindBody() error {
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
s, ok := r.Body.(io.Seeker)
|
||||
if !ok {
|
||||
panic("unexpected request body type (should be io.Seeker)")
|
||||
}
|
||||
|
||||
// Reset the stream back to the beginning
|
||||
_, err := s.Seek(0, io.SeekStart)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
|
||||
|
||||
// This struct is used when sending a body to the network
|
||||
type retryableRequestBody struct {
|
||||
body io.ReadSeeker // Seeking is required to support retries
|
||||
}
|
||||
|
||||
// Read reads a block of data from an inner stream and reports progress
|
||||
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
|
||||
return b.body.Read(p)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
||||
return b.body.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) Close() error {
|
||||
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
|
||||
// The pipeline closes the request body upon success.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *retryableRequestBody) realClose() error {
|
||||
if c, ok := b.body.(io.Closer); ok {
|
||||
return c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Executable file
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Executable file
@@ -0,0 +1,74 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
|
||||
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
|
||||
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
|
||||
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
|
||||
// to the expected struct and returns the struct to its caller.
|
||||
type Response interface {
|
||||
Response() *http.Response
|
||||
}
|
||||
|
||||
// This is the default struct that has the http.Response.
|
||||
// A method can replace this struct with its own struct containing an http.Response
|
||||
// field and any other additional fields.
|
||||
type httpResponse struct {
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
// NewHTTPResponse is typically called by a Policy object to return a Response object.
|
||||
func NewHTTPResponse(response *http.Response) Response {
|
||||
return &httpResponse{response: response}
|
||||
}
|
||||
|
||||
// This method satisfies the public Response interface's Response method
|
||||
func (r httpResponse) Response() *http.Response {
|
||||
return r.response
|
||||
}
|
||||
|
||||
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
|
||||
// not nil, then these are also written into the Buffer.
|
||||
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
|
||||
// Write the request into the buffer.
|
||||
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
|
||||
writeHeader(b, request.Header)
|
||||
if response != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
|
||||
writeHeader(b, response.Header)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
||||
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
|
||||
}
|
||||
}
|
||||
|
||||
// formatHeaders appends an HTTP request's or response's header into a Buffer.
|
||||
func writeHeader(b *bytes.Buffer, header map[string][]string) {
|
||||
if len(header) == 0 {
|
||||
b.WriteString(" (no headers)\n")
|
||||
return
|
||||
}
|
||||
keys := make([]string, 0, len(header))
|
||||
// Alphabetize the headers
|
||||
for k := range header {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
// Redact the value of any Authorization header to prevent security information from persisting in logs
|
||||
value := interface{}("REDACTED")
|
||||
if !strings.EqualFold(k, "Authorization") {
|
||||
value = header[k]
|
||||
}
|
||||
fmt.Fprintf(b, " %s: %+v\n", k, value)
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package pipeline
|
||||
|
||||
const (
|
||||
// UserAgent is the string to be used in the user agent string when making requests.
|
||||
UserAgent = "azure-pipeline-go/" + Version
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
||||
Version = "0.1.0"
|
||||
)
|
||||
12
vendor/github.com/Azure/azure-sdk-for-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
12
vendor/github.com/Azure/azure-sdk-for-go/.github/PULL_REQUEST_TEMPLATE.md
generated
vendored
@@ -1,12 +0,0 @@
|
||||
Thanks you for your contribution to the Azure-SDK-for-Go! We will triage and review it as quickly as we can.
|
||||
|
||||
As part of your submission, please make sure that you can make the following assertions:
|
||||
|
||||
- [ ] I'm not making changes to Auto-Generated files which will just get erased next time there's a release.
|
||||
- If that's what you want to do, consider making a contribution here: https://github.com/Azure/autorest.go
|
||||
- [ ] I've tested my changes, adding unit tests where applicable.
|
||||
- [ ] I've added Apache 2.0 Headers to the top of any new source files.
|
||||
- [ ] I'm submitting this PR to the `dev` branch, or I'm fixing a bug that warrants its own release and I'm targeting the `master` branch.
|
||||
- [ ] If I'm targeting the `master` branch, I've also added a note to [CHANGELOG.md](https://github.com/Azure/azure-sdk-for-go/blob/master/README.md).
|
||||
- [ ] I've mentioned any relevant open issues in this PR, making clear the context for the contribution.
|
||||
|
||||
32
vendor/github.com/Azure/azure-sdk-for-go/.gitignore
generated
vendored
32
vendor/github.com/Azure/azure-sdk-for-go/.gitignore
generated
vendored
@@ -1,32 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
# *.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# Editor swap files
|
||||
*.swp
|
||||
*~
|
||||
.DS_Store
|
||||
|
||||
# ignore vendor/
|
||||
vendor/
|
||||
34
vendor/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
34
vendor/github.com/Azure/azure-sdk-for-go/.travis.yml
generated
vendored
@@ -1,34 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- master
|
||||
|
||||
env:
|
||||
global:
|
||||
- DEP_VERSION="0.4.1"
|
||||
- DEP_SHA=31144e465e52ffbc0035248a10ddea61a09bf28b00784fd3fdd9882c8cbb2315
|
||||
- IGNORE_BREAKING_CHANGES=true
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
|
||||
before_install:
|
||||
- curl -fL -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && echo "$DEP_SHA $GOPATH/bin/dep" | sha256sum -c - && chmod +x $GOPATH/bin/dep
|
||||
|
||||
install:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- dep ensure
|
||||
|
||||
script:
|
||||
- bash rungas.sh
|
||||
- grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)"
|
||||
- go build $(go list ./... | grep -v vendor)
|
||||
- test -z "$(go fmt $(go list ./... | grep -v vendor) | tee /dev/stderr)"
|
||||
- go vet $(go list ./... | grep -v vendor)
|
||||
- go test $(sh ./findTestedPackages.sh)
|
||||
- go run ./tools/apidiff/main.go packages ./services FETCH_HEAD~1 FETCH_HEAD --copyrepo --breakingchanges || $IGNORE_BREAKING_CHANGES
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user