1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-12 14:33:22 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
ec40ffbdc1 drive: set the IncludeCorpusRemovals flag so changes returns deletes
Without this flag changes does not include files which were hard
deleted.

Fixes #3020
2019-03-05 14:37:46 +00:00
109 changed files with 331 additions and 4095 deletions

View File

@@ -1,33 +1,27 @@
---
language: go
sudo: required
dist: trusty
os:
- linux
- linux
go:
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- tip
go_import_path: github.com/ncw/rclone
before_install:
- git fetch --unshallow --tags
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
fi
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew update
brew tap caskroom/cask
brew cask install osxfuse
fi
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
choco install -y winfsp zip make
cd ../.. # fix crlf in git checkout
mv $TRAVIS_REPO_SLUG _old
git config --global core.autocrlf false
git clone _old $TRAVIS_REPO_SLUG
cd $TRAVIS_REPO_SLUG
fi
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
install:
- make vars
- git fetch --unshallow --tags
- make vars
- make build_dep
script:
- make check
- make quicktest
- make compile_all
env:
global:
- GOTAGS=cmount
@@ -38,66 +32,23 @@ env:
addons:
apt:
packages:
- fuse
- libfuse-dev
- rpm
- pkg-config
- fuse
- libfuse-dev
- rpm
- pkg-config
cache:
directories:
- $HOME/.cache/go-build
matrix:
allow_failures:
- go: tip
- go: tip
include:
- go: 1.8.x
script:
- make quicktest
- go: 1.9.x
script:
- make quicktest
- go: 1.10.x
script:
- make quicktest
- go: 1.11.x
script:
- make quicktest
- go: 1.12.x
env:
- GOTAGS=cmount
script:
- make build_dep
- make check
- make quicktest
- make racequicktest
- make compile_all
- os: osx
go: 1.12.x
env:
- GOTAGS= # cmount doesn't work on osx travis for some reason
cache:
directories:
- $HOME/Library/Caches/go-build
script:
- make
- make quicktest
- make racequicktest
# - os: windows
# go: 1.12.x
# env:
# - GOTAGS=cmount
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
# #filter_secrets: false # works around a problem with secrets under windows
# cache:
# directories:
# - ${LocalAppData}/go-build
# script:
# - make
# - make quicktest
# - make racequicktest
- go: tip
script:
- make quicktest
- os: osx
go: 1.12.x
env: GOTAGS=""
cache:
directories:
- $HOME/Library/Caches/go-build
deploy:
provider: script
script: make travis_beta
@@ -106,4 +57,4 @@ deploy:
repo: ncw/rclone
all_branches: true
go: 1.12.x
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"
condition: $TRAVIS_PULL_REQUEST == false

View File

@@ -17,6 +17,8 @@ ifneq ($(TAG),$(LAST_TAG))
endif
GO_VERSION := $(shell go version)
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
# Run full tests if go >= go1.12
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 12)')
BETA_PATH := $(BRANCH_PATH)$(TAG)
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
@@ -40,6 +42,7 @@ vars:
@echo LAST_TAG="'$(LAST_TAG)'"
@echo NEW_TAG="'$(NEW_TAG)'"
@echo GO_VERSION="'$(GO_VERSION)'"
@echo FULL_TESTS="'$(FULL_TESTS)'"
@echo BETA_URL="'$(BETA_URL)'"
version:
@@ -54,22 +57,28 @@ test: rclone
# Quick test
quicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
racequicktest:
ifdef FULL_TESTS
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
endif
# Do source code quality checks
check: rclone
ifdef FULL_TESTS
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------"
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------"
else
@echo Skipping source quality tests as version of go too old
endif
# Get the build dependencies
build_dep:
ifdef FULL_TESTS
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
endif
# Get the release dependencies
release_dep:
@@ -153,7 +162,11 @@ log_since_last_release:
git log $(LAST_TAG)..
compile_all:
ifdef FULL_TESTS
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
else
@echo Skipping compile all as version of go too old
endif
appveyor_upload:
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
@@ -173,11 +186,6 @@ BUILD_FLAGS := -exclude "^(windows|darwin)/"
ifeq ($(TRAVIS_OS_NAME),osx)
BUILD_FLAGS := -include "^darwin/" -cgo
endif
ifeq ($(TRAVIS_OS_NAME),windows)
# BUILD_FLAGS := -include "^windows/" -cgo
# 386 doesn't build yet
BUILD_FLAGS := -include "^windows/amd64" -cgo
endif
travis_beta:
ifeq ($(TRAVIS_OS_NAME),linux)

View File

@@ -36,7 +36,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)

View File

@@ -16,7 +16,6 @@ import (
_ "github.com/ncw/rclone/backend/http"
_ "github.com/ncw/rclone/backend/hubic"
_ "github.com/ncw/rclone/backend/jottacloud"
_ "github.com/ncw/rclone/backend/koofr"
_ "github.com/ncw/rclone/backend/local"
_ "github.com/ncw/rclone/backend/mega"
_ "github.com/ncw/rclone/backend/onedrive"

View File

@@ -952,13 +952,6 @@ func (f *Fs) hide(Name string) error {
return f.shouldRetry(resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok {
if apiErr.Code == "already_hidden" {
// sometimes eventual consistency causes this, so
// ignore this error since it is harmless
return nil
}
}
return errors.Wrapf(err, "failed to hide %q", Name)
}
return nil

View File

@@ -1191,7 +1191,7 @@ func (f *Fs) Rmdir(dir string) error {
}
var queuedEntries []*Object
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.Walk(f.tempFs, dir, true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo)
@@ -1287,7 +1287,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
var queuedEntries []*Object
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err := walk.Walk(f.tempFs, srcRemote, true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo)

View File

@@ -1023,7 +1023,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
}
var queuedEntries []fs.Object
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.Walk(cacheFs.tempFs, "", true, -1, func(path string, entries fs.DirEntries, err error) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
queuedEntries = append(queuedEntries, oo)

View File

@@ -240,22 +240,6 @@ func init() {
Default: false,
Help: "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
Advanced: true,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum.
Google photos are identifed by being in the "photos" space.
Corrupted checksums are caused by Google modifying the image/video but
not updating the checksum.`,
Advanced: true,
}, {
Name: "shared_with_me",
Default: false,
@@ -412,7 +396,6 @@ type Options struct {
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
TrashedOnly bool `config:"trashed_only"`
Extensions string `config:"formats"`
@@ -632,9 +615,6 @@ func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, inc
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
fields = fmt.Sprintf("files(%s),nextPageToken", fields)
@@ -696,33 +676,28 @@ func isPowerOfTwo(x int64) bool {
}
// add a charset parameter to all text/* MIME types
func fixMimeType(mimeTypeIn string) string {
if mimeTypeIn == "" {
return ""
}
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
func fixMimeType(mimeType string) string {
mediaType, param, err := mime.ParseMediaType(mimeType)
if err != nil {
return mimeTypeIn
return mimeType
}
mimeTypeOut := mimeTypeIn
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
param["charset"] = "utf-8"
mimeTypeOut = mime.FormatMediaType(mediaType, param)
mimeType = mime.FormatMediaType(mediaType, param)
}
if mimeTypeOut == "" {
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
}
return mimeTypeOut
return mimeType
}
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
out = make(map[string][]string, len(in))
for k, v := range in {
func fixMimeTypeMap(m map[string][]string) map[string][]string {
for _, v := range m {
for i, mt := range v {
v[i] = fixMimeType(mt)
fixed := fixMimeType(mt)
if fixed == "" {
panic(errors.Errorf("unable to fix MIME type %q", mt))
}
v[i] = fixed
}
out[fixMimeType(k)] = v
}
return out
return m
}
func isInternalMimeType(mimeType string) bool {
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
@@ -927,7 +902,6 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: true,
}).Fill(f)
// Create a new authorized Drive client.
@@ -1022,15 +996,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
// newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f.opt.SkipChecksumGphotos {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
break
}
}
}
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
@@ -2238,7 +2203,7 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
var changeList *drive.ChangeList
err = f.pacer.Call(func() (bool, error) {
changesCall := f.svc.Changes.List(pageToken).
changesCall := f.svc.Changes.List(pageToken).IncludeCorpusRemovals(true).
Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
if f.opt.ListChunk > 0 {
changesCall.PageSize(f.opt.ListChunk)
@@ -2465,10 +2430,6 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
return req, nil, err
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
if o.bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete(req.Header, "Range")
}
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.client.Do(req)
if err == nil {

View File

@@ -15,7 +15,6 @@ import (
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors"
)
@@ -46,11 +45,6 @@ func init() {
Help: "FTP password",
IsPassword: true,
Required: true,
}, {
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
Default: 0,
Advanced: true,
},
},
})
@@ -58,11 +52,10 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
Concurrency int `config:"concurrency"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
}
// Fs represents a remote FTP server
@@ -77,7 +70,6 @@ type Fs struct {
dialAddr string
poolMu sync.Mutex
pool []*ftp.ServerConn
tokens *pacer.TokenDispenser
}
// Object describes an FTP file
@@ -136,9 +128,6 @@ func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
// Get an FTP connection from the pool, or open a new one
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
f.tokens.Get()
}
f.poolMu.Lock()
if len(f.pool) > 0 {
c = f.pool[0]
@@ -158,9 +147,6 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
// if err is not nil then it checks the connection is alive using a
// NOOP request
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
if f.opt.Concurrency > 0 {
defer f.tokens.Put()
}
c := *pc
*pc = nil
if err != nil {
@@ -212,7 +198,6 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
user: user,
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,

View File

@@ -147,22 +147,6 @@ func init() {
Value: "publicReadWrite",
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
}},
}, {
Name: "bucket_policy_only",
Help: `Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
`,
Default: false,
}, {
Name: "location",
Help: "Location for the newly created buckets.",
@@ -260,7 +244,6 @@ type Options struct {
ServiceAccountCredentials string `config:"service_account_credentials"`
ObjectACL string `config:"object_acl"`
BucketACL string `config:"bucket_acl"`
BucketPolicyOnly bool `config:"bucket_policy_only"`
Location string `config:"location"`
StorageClass string `config:"storage_class"`
}
@@ -733,19 +716,8 @@ func (f *Fs) Mkdir(dir string) (err error) {
Location: f.opt.Location,
StorageClass: f.opt.StorageClass,
}
if f.opt.BucketPolicyOnly {
bucket.IamConfiguration = &storage.BucketIamConfiguration{
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
err = f.pacer.Call(func() (bool, error) {
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
if !f.opt.BucketPolicyOnly {
insertBucket.PredefinedAcl(f.opt.BucketACL)
}
_, err = insertBucket.Do()
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
return shouldRetry(err)
})
if err == nil {
@@ -1011,11 +983,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
if !o.fs.opt.BucketPolicyOnly {
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
}
newObject, err = insertObject.Do()
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
return shouldRetry(err)
})
if err != nil {

View File

@@ -6,7 +6,6 @@ package http
import (
"io"
"mime"
"net/http"
"net/url"
"path"
@@ -45,22 +44,6 @@ func init() {
Value: "https://user:pass@example.com",
Help: "Connect to example.com using a username and password",
}},
}, {
Name: "no_slash",
Help: `Set this if the site doesn't end directories with /
Use this if your target website does not use / on the end of
directories.
A / on the end of a path is how rclone normally tells the difference
between files and directories. If this flag is set, then rclone will
treat all files with Content-Type: text/html as directories and read
URLs from them rather than downloading them.
Note that this may cause rclone to confuse genuine HTML files with
directories.`,
Default: false,
Advanced: true,
}},
}
fs.Register(fsi)
@@ -69,7 +52,6 @@ directories.`,
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
NoSlash bool `config:"no_slash"`
}
// Fs stores the interface to the remote HTTP files
@@ -288,20 +270,14 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
if err != nil {
return nil, err
}
var (
walk func(*html.Node)
seen = make(map[string]struct{})
)
var walk func(*html.Node)
walk = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
name, err := parseName(base, a.Val)
if err == nil {
if _, found := seen[name]; !found {
names = append(names, name)
seen[name] = struct{}{}
}
names = append(names, name)
}
break
}
@@ -326,16 +302,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
}
res, err := f.httpClient.Get(URL)
if err == nil {
defer fs.CheckClose(res.Body, &err)
if res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
if err == nil && res.StatusCode == http.StatusNotFound {
return nil, fs.ErrorDirNotFound
}
err = statusError(res, err)
if err != nil {
return nil, errors.Wrap(err, "failed to readDir")
}
defer fs.CheckClose(res.Body, &err)
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
switch contentType {
@@ -379,16 +353,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs: f,
remote: remote,
}
switch err = file.stat(); err {
case nil:
entries = append(entries, file)
case fs.ErrorNotAFile:
// ...found a directory not a file
dir := fs.NewDir(remote, timeUnset)
entries = append(entries, dir)
default:
if err = file.stat(); err != nil {
fs.Debugf(remote, "skipping because of error: %v", err)
continue
}
entries = append(entries, file)
}
}
return entries, nil
@@ -464,16 +433,6 @@ func (o *Object) stat() error {
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
if err != nil {
return errors.Wrapf(err, "failed to parse Content-Type: %q", o.contentType)
}
if mediaType == "text/html" {
return fs.ErrorNotAFile
}
}
return nil
}

View File

@@ -65,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
func testListRoot(t *testing.T, f fs.Fs) {
entries, err := f.List("")
require.NoError(t, err)
@@ -93,29 +93,15 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
if noSlash {
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
assert.Equal(t, int64(7), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
func TestListRoot(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
testListRoot(t, f)
}
func TestListSubDir(t *testing.T) {
@@ -208,7 +194,7 @@ func TestIsAFileRoot(t *testing.T) {
f, err := NewFs(remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
testListRoot(t, f, false)
testListRoot(t, f)
}
func TestIsAFileSubDir(t *testing.T) {

View File

@@ -1 +1 @@
<a href="two.html/file.txt">file.txt</a>
potato

View File

@@ -1,589 +0,0 @@
package koofr
import (
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
koofrclient "github.com/koofr/go-koofrclient"
)
// Register Fs with rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "endpoint",
Help: "The Koofr API endpoint to use",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "user",
Help: "Your Koofr user name",
Required: true,
}, {
Name: "password",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
IsPassword: true,
Required: true,
},
},
})
}
// Options represent the configuration of the Koofr backend
type Options struct {
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
Password string `config:"password"`
}
// A Fs is a representation of a remote Koofr Fs
type Fs struct {
name string
mountID string
root string
opt Options
features *fs.Features
client *koofrclient.KoofrClient
}
// An Object on the remote Koofr Fs
type Object struct {
fs *Fs
remote string
info koofrclient.FileInfo
}
func base(pth string) string {
rv := path.Base(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
func dir(pth string) string {
rv := path.Dir(pth)
if rv == "" || rv == "." {
rv = "/"
}
return rv
}
// String returns a string representation of the remote Object
func (o *Object) String() string {
return o.remote
}
// Remote returns the remote path of the Object, relative to Fs root
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the Object
func (o *Object) ModTime() time.Time {
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
}
// Size return the size of the Object in bytes
func (o *Object) Size() int64 {
return o.info.Size
}
// Fs returns a reference to the Koofr Fs containing the Object
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns an MD5 hash of the Object
func (o *Object) Hash(typ hash.Type) (string, error) {
if typ == hash.MD5 {
return o.info.Hash, nil
}
return "", nil
}
// fullPath returns full path of the remote Object (including Fs root)
func (o *Object) fullPath() string {
return o.fs.fullPath(o.remote)
}
// Storable returns true if the Object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime is not supported
func (o *Object) SetModTime(mtime time.Time) error {
return nil
}
// Open opens the Object for reading
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
var sOff, eOff int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
sOff = x.Offset
case *fs.RangeOption:
sOff = x.Start
eOff = x.End
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if sOff == 0 && eOff < 0 {
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
}
if sOff < 0 {
sOff = o.Size() - eOff
eOff = o.Size()
}
if eOff > o.Size() {
eOff = o.Size()
}
span := &koofrclient.FileSpan{
Start: sOff,
End: eOff,
}
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
}
// Update updates the Object contents
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := o.fullPath()
dirPath := dir(fullPath)
name := base(fullPath)
err := o.fs.mkdir(dirPath)
if err != nil {
return err
}
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
if err != nil {
return err
}
o.info = *info
return nil
}
// Remove deletes the remote Object
func (o *Object) Remove() error {
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
}
// Name returns the name of the Fs
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path of the Fs
func (f *Fs) Root() string {
return f.root
}
// String returns a string representation of the Fs
func (f *Fs) String() string {
return "koofr:" + f.mountID + ":" + f.root
}
// Features returns the optional features supported by this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision denotes that setting modification times is not supported
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns a set of hashes are Provided by the Fs
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// fullPath constructs a full, absolute path from a Fs root relative path,
func (f *Fs) fullPath(part string) string {
return path.Join("/", f.root, part)
}
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)
mounts, err := client.Mounts()
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: root,
opt: *opt,
client: client,
}
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(f)
for _, m := range mounts {
if opt.MountID != "" {
if m.Id == opt.MountID {
f.mountID = m.Id
break
}
} else if m.IsPrimary {
f.mountID = m.Id
break
}
}
if f.mountID == "" {
if opt.MountID == "" {
return nil, errors.New("Failed to find primary mount")
}
return nil, errors.New("Failed to find mount " + opt.MountID)
}
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
if err == nil && rootFile.Type != "dir" {
f.root = dir(f.root)
err = fs.ErrorIsFile
} else {
err = nil
}
return f, err
}
// List returns a list of items in a directory
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return nil, translateErrorsDir(err)
}
entries = make([]fs.DirEntry, len(files))
for i, file := range files {
if file.Type == "dir" {
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
} else {
entries[i] = &Object{
fs: f,
info: file,
remote: path.Join(dir, file.Name),
}
}
}
return entries, nil
}
// NewObject creates a new remote Object for a given remote path
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
if err != nil {
return nil, translateErrorsObject(err)
}
if info.Type == "dir" {
return nil, fs.ErrorNotAFile
}
return &Object{
fs: f,
info: info,
remote: remote,
}, nil
}
// Put updates a remote Object
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
putopts := &koofrclient.PutFilter{
ForceOverwrite: true,
NoRename: true,
IgnoreNonExisting: true,
}
fullPath := f.fullPath(src.Remote())
dirPath := dir(fullPath)
name := base(fullPath)
err = f.mkdir(dirPath)
if err != nil {
return nil, err
}
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
if err != nil {
return nil, translateErrorsObject(err)
}
return &Object{
fs: f,
info: *info,
remote: src.Remote(),
}, nil
}
// PutStream updates a remote Object with a stream of unknown size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// isBadRequest is a predicate which holds true iff the error returned was
// HTTP status 400
func isBadRequest(err error) bool {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusBadRequest {
return true
}
}
return false
}
// translateErrorsDir translates koofr errors to rclone errors (for a dir
// operation)
func translateErrorsDir(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorDirNotFound
}
}
return err
}
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
// mkdir creates a directory at the given remote path. Creates ancestors if
// neccessary
func (f *Fs) mkdir(fullPath string) error {
if fullPath == "/" {
return nil
}
info, err := f.client.FilesInfo(f.mountID, fullPath)
if err == nil && info.Type == "dir" {
return nil
}
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
dirs := strings.Split(fullPath, "/")
parent := "/"
for _, part := range dirs {
if part == "" {
continue
}
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
if err != nil || info.Type != "dir" {
err = translateErrorsDir(err)
if err != nil && err != fs.ErrorDirNotFound {
return err
}
err = f.client.FilesNewFolder(f.mountID, parent, part)
if err != nil && !isBadRequest(err) {
return err
}
}
parent = path.Join(parent, part)
}
return nil
}
// Mkdir creates a directory at the given remote path. Creates ancestors if
// necessary
func (f *Fs) Mkdir(dir string) error {
fullPath := f.fullPath(dir)
return f.mkdir(fullPath)
}
// Rmdir removes an (empty) directory at the given remote path
func (f *Fs) Rmdir(dir string) error {
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
if len(files) > 0 {
return fs.ErrorDirectoryNotEmpty
}
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
if err != nil {
return translateErrorsDir(err)
}
return nil
}
// Copy copies a remote Object to the given path
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantCopy
}
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantCopy
}
return f.NewObject(remote)
}
// Move moves a remote Object to the given path
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
srcObj := src.(*Object)
dstFullPath := f.fullPath(remote)
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return nil, fs.ErrorCantMove
}
err = f.client.FilesMove(srcObj.fs.mountID,
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
if err != nil {
return nil, fs.ErrorCantMove
}
return f.NewObject(remote)
}
// DirMove moves a remote directory to the given path
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
srcFs := src.(*Fs)
srcFullPath := srcFs.fullPath(srcRemote)
dstFullPath := f.fullPath(dstRemote)
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
return fs.ErrorDirExists
}
dstDir := dir(dstFullPath)
err := f.mkdir(dstDir)
if err != nil {
return fs.ErrorCantDirMove
}
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
if err != nil {
return fs.ErrorCantDirMove
}
return nil
}
// About reports space usage (with a MB precision)
func (f *Fs) About() (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
Trashed: nil,
Other: nil,
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
Objects: nil,
}, nil
}
// Purge purges the complete Fs
func (f *Fs) Purge() error {
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
return err
}
// linkCreate is a Koofr API request for creating a public link
type linkCreate struct {
Path string `json:"path"`
}
// link is a Koofr API response to creating a public link
type link struct {
ID string `json:"id"`
Name string `json:"name"`
Path string `json:"path"`
Counter int64 `json:"counter"`
URL string `json:"url"`
ShortURL string `json:"shortUrl"`
Hash string `json:"hash"`
Host string `json:"host"`
HasPassword bool `json:"hasPassword"`
Password string `json:"password"`
ValidFrom int64 `json:"validFrom"`
ValidTo int64 `json:"validTo"`
PasswordRequired bool `json:"passwordRequired"`
}
// createLink makes a Koofr API call to create a public link
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
linkCreate := linkCreate{
Path: path,
}
linkData := link{}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountID + "/links",
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: linkCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &linkData,
}
_, err := c.Request(&request)
if err != nil {
return nil, err
}
return &linkData, nil
}
// PublicLink creates a public link to the remote path
func (f *Fs) PublicLink(remote string) (string, error) {
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
if err != nil {
return "", translateErrorsDir(err)
}
return linkData.ShortURL, nil
}

View File

@@ -1,14 +0,0 @@
package koofr_test
import (
"testing"
"github.com/ncw/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestKoofr:",
})
}

View File

@@ -335,13 +335,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
// if `relPath` == "", it reads the metadata for the item with that ID.
//
// We address items using the pattern `drives/driveID/items/itemID:/relativePath`
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(replaceReservedChars(relPath))))
opts := newOptsCall(normalizedID, "GET", ":/"+rest.URLPathEscape(replaceReservedChars(relPath)))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(&opts, nil, &info)
return shouldRetry(resp, err)
@@ -708,7 +703,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
if folder != nil {
d.SetItems(folder.ChildCount)
}
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(remote, info)
@@ -822,6 +819,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
return err
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}
@@ -1340,12 +1340,12 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
opts = rest.Opts{
Method: "PATCH",
RootURL: rootURL,
Path: "/" + drive + "/items/" + trueDirID + ":/" + withTrailingColon(rest.URLPathEscape(leaf)),
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(leaf),
}
} else {
opts = rest.Opts{
Method: "PATCH",
Path: "/root:/" + withTrailingColon(rest.URLPathEscape(o.srvPath())),
Path: "/root:/" + rest.URLPathEscape(o.srvPath()),
}
}
update := api.SetFileSystemInfo{
@@ -1668,21 +1668,6 @@ func getRelativePathInsideBase(base, target string) (string, bool) {
return "", false
}
// Adds a ":" at the end of `remotePath` in a proper manner.
// If `remotePath` already ends with "/", change it to ":/"
// If `remotePath` is "", return "".
// A workaround for #2720 and #3039
func withTrailingColon(remotePath string) string {
if remotePath == "" {
return ""
}
if strings.HasSuffix(remotePath, "/") {
return remotePath[:len(remotePath)-1] + ":/"
}
return remotePath + ":"
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -287,6 +287,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
return err
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}

View File

@@ -2,7 +2,6 @@ package swift
import (
"net/http"
"time"
"github.com/ncw/swift"
)
@@ -66,14 +65,6 @@ func (a *auth) Token() string {
return a.parentAuth.Token()
}
// Expires returns the time the token expires if known or Zero if not.
func (a *auth) Expires() (t time.Time) {
if do, ok := a.parentAuth.(swift.Expireser); ok {
t = do.Expires()
}
return t
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
if a.parentAuth == nil {
@@ -83,7 +74,4 @@ func (a *auth) CdnUrl() string { // nolint
}
// Check the interfaces are satisfied
var (
_ swift.Authenticator = (*auth)(nil)
_ swift.Expireser = (*auth)(nil)
)
var _ swift.Authenticator = (*auth)(nil)

View File

@@ -644,18 +644,10 @@ func (f *Fs) _mkdir(dirPath string) error {
Path: dirPath,
NoResponse: true,
}
err := f.pacer.Call(func() (bool, error) {
return f.pacer.Call(func() (bool, error) {
resp, err := f.srv.Call(&opts)
return shouldRetry(resp, err)
})
if apiErr, ok := err.(*api.Error); ok {
// already exists
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
return nil
}
}
return err
}
// mkdir makes the directory and parents using native paths
@@ -663,7 +655,12 @@ func (f *Fs) mkdir(dirPath string) error {
// defer log.Trace(dirPath, "")("")
err := f._mkdir(dirPath)
if apiErr, ok := err.(*api.Error); ok {
// parent does not exist so create it first then try again
// already exists
// owncloud returns 423/StatusLocked if the create is already in progress
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
return nil
}
// parent does not exist
if apiErr.StatusCode == http.StatusConflict {
err = f.mkParentDir(dirPath)
if err == nil {
@@ -916,13 +913,11 @@ func (f *Fs) About() (*fs.Usage, error) {
return nil, errors.Wrap(err, "about call failed")
}
usage := &fs.Usage{}
if q.Available != 0 || q.Used != 0 {
if q.Available >= 0 && q.Used >= 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used)
}
if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
if q.Available >= 0 && q.Used >= 0 {
usage.Total = fs.NewUsageValue(q.Available + q.Used)
}
if q.Used >= 0 {
usage.Used = fs.NewUsageValue(q.Used)
}
return usage, nil
}

View File

@@ -244,10 +244,8 @@ func getAssetFromReleasesPage(project string, matchName *regexp.Regexp) (assetUR
if a.Key == "href" {
if name := path.Base(a.Val); matchName.MatchString(name) && isOurOsArch(name) {
if u, err := rest.URLJoin(base, a.Val); err == nil {
if assetName == "" {
assetName = name
assetURL = u.String()
}
assetName = name
assetURL = u.String()
}
}
break

View File

@@ -36,7 +36,6 @@ docs = [
"http.md",
"hubic.md",
"jottacloud.md",
"koofr.md",
"mega.md",
"azureblob.md",
"onedrive.md",

View File

@@ -341,7 +341,8 @@ func initConfig() {
configflags.SetFlags()
// Load filters
err := filterflags.Reload()
var err error
filter.Active, err = filter.NewFilter(&filterflags.Opt)
if err != nil {
log.Fatalf("Failed to load filters: %v", err)
}

View File

@@ -7,13 +7,8 @@ import (
"github.com/spf13/cobra"
)
var (
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
}
var commandDefintion = &cobra.Command{
@@ -74,7 +69,7 @@ changed recently very efficiently like this:
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
return sync.CopyDir(fdst, fsrc)
}
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
})

View File

@@ -48,7 +48,7 @@ destination.
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.CopyDir(fdst, fsrc, false)
return sync.CopyDir(fdst, fsrc)
}
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
})

View File

@@ -45,7 +45,7 @@ __rclone_custom_func() {
else
__rclone_init_completion -n : || return
fi
if [[ $cur != *:* ]]; then
if [[ $cur =~ ^[[:alnum:]_]*$ ]]; then
local remote
while IFS= read -r remote; do
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
@@ -54,10 +54,10 @@ __rclone_custom_func() {
local paths=("$cur"*)
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
fi
else
elif [[ $cur =~ ^[[:alnum:]_]+: ]]; then
local path=${cur#*:}
if [[ $path == */* ]]; then
local prefix=$(eval printf '%s' "${path%/*}")
local prefix=${path%/*}
else
local prefix=
fi
@@ -66,7 +66,6 @@ __rclone_custom_func() {
local reply=${prefix:+$prefix/}$line
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
[[ ! ${COMPREPLY[@]} ]] || compopt -o filenames
fi
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
fi

View File

@@ -37,8 +37,6 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
a.Crtime = modTime
// FIXME include Valid so get some caching?
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
a.Size = 512
a.Blocks = 1
return nil
}

View File

@@ -10,13 +10,11 @@ import (
// Globals
var (
deleteEmptySrcDirs = false
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
}
var commandDefintion = &cobra.Command{
@@ -54,7 +52,7 @@ can speed transfers up greatly.
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
}
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
})

View File

@@ -52,7 +52,7 @@ transfer.
cmd.Run(true, true, command, func() error {
if srcFileName == "" {
return sync.MoveDir(fdst, fsrc, false, false)
return sync.MoveDir(fdst, fsrc, false)
}
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
})

View File

@@ -1,184 +0,0 @@
package dlna
const connectionManagerServiceDescription = `<?xml version="1.0" encoding="UTF-8"?>
<scpd xmlns="urn:schemas-upnp-org:service-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<actionList>
<action>
<name>GetProtocolInfo</name>
<argumentList>
<argument>
<name>Source</name>
<direction>out</direction>
<relatedStateVariable>SourceProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>Sink</name>
<direction>out</direction>
<relatedStateVariable>SinkProtocolInfo</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>PrepareForConnection</name>
<argumentList>
<argument>
<name>RemoteProtocolInfo</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionManager</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>Direction</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
</argument>
<argument>
<name>ConnectionID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>AVTransportID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
</argument>
<argument>
<name>RcsID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>ConnectionComplete</name>
<argumentList>
<argument>
<name>ConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetCurrentConnectionIDs</name>
<argumentList>
<argument>
<name>ConnectionIDs</name>
<direction>out</direction>
<relatedStateVariable>CurrentConnectionIDs</relatedStateVariable>
</argument>
</argumentList>
</action>
<action>
<name>GetCurrentConnectionInfo</name>
<argumentList>
<argument>
<name>ConnectionID</name>
<direction>in</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>RcsID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_RcsID</relatedStateVariable>
</argument>
<argument>
<name>AVTransportID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_AVTransportID</relatedStateVariable>
</argument>
<argument>
<name>ProtocolInfo</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ProtocolInfo</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionManager</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionManager</relatedStateVariable>
</argument>
<argument>
<name>PeerConnectionID</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionID</relatedStateVariable>
</argument>
<argument>
<name>Direction</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_Direction</relatedStateVariable>
</argument>
<argument>
<name>Status</name>
<direction>out</direction>
<relatedStateVariable>A_ARG_TYPE_ConnectionStatus</relatedStateVariable>
</argument>
</argumentList>
</action>
</actionList>
<serviceStateTable>
<stateVariable sendEvents="yes">
<name>SourceProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>SinkProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="yes">
<name>CurrentConnectionIDs</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionStatus</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>OK</allowedValue>
<allowedValue>ContentFormatMismatch</allowedValue>
<allowedValue>InsufficientBandwidth</allowedValue>
<allowedValue>UnreliableChannel</allowedValue>
<allowedValue>Unknown</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionManager</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_Direction</name>
<dataType>string</dataType>
<allowedValueList>
<allowedValue>Input</allowedValue>
<allowedValue>Output</allowedValue>
</allowedValueList>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ProtocolInfo</name>
<dataType>string</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_ConnectionID</name>
<dataType>i4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_AVTransportID</name>
<dataType>i4</dataType>
</stateVariable>
<stateVariable sendEvents="no">
<name>A_ARG_TYPE_RcsID</name>
<dataType>i4</dataType>
</stateVariable>
</serviceStateTable>
</scpd>`

View File

@@ -84,21 +84,6 @@ var services = []*service{
},
SCPD: contentDirectoryServiceDescription,
},
{
Service: upnp.Service{
ServiceType: "urn:schemas-upnp-org:service:ConnectionManager:1",
ServiceId: "urn:upnp-org:serviceId:ConnectionManager",
ControlURL: serviceControlURL,
},
SCPD: connectionManagerServiceDescription,
},
}
func init() {
for _, s := range services {
p := path.Join("/scpd", s.ServiceId)
s.SCPDURL = p
}
}
func devices() []string {
@@ -265,6 +250,9 @@ func (s *server) initMux(mux *http.ServeMux) {
// Install handlers to serve SCPD for each UPnP service.
for _, s := range services {
p := path.Join("/scpd", s.ServiceId)
s.SCPDURL = p
mux.HandleFunc(s.SCPDURL, func(serviceDesc string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", `text/xml; charset="utf-8"`)

View File

@@ -59,11 +59,6 @@ func TestRootSCPD(t *testing.T) {
// Make sure that the SCPD contains a CDS service.
require.Contains(t, string(body),
"<serviceType>urn:schemas-upnp-org:service:ContentDirectory:1</serviceType>")
// Make sure that the SCPD contains a CM service.
require.Contains(t, string(body),
"<serviceType>urn:schemas-upnp-org:service:ConnectionManager:1</serviceType>")
// Ensure that the SCPD url is configured.
require.Regexp(t, "<SCPDURL>/.*</SCPDURL>", string(body))
}
// Make sure that it serves content from the remote.

View File

@@ -330,12 +330,25 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
ls := listItems{}
// if remote supports ListR use that directly, otherwise use recursive Walk
err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, entry := range entries {
ls.add(entry)
}
return nil
})
var err error
if ListR := s.f.Features().ListR; ListR != nil {
err = ListR(remote, func(entries fs.DirEntries) error {
for _, entry := range entries {
ls.add(entry)
}
return nil
})
} else {
err = walk.Walk(s.f, remote, true, -1, func(path string, entries fs.DirEntries, err error) error {
if err == nil {
for _, entry := range entries {
ls.add(entry)
}
}
return err
})
}
if err != nil {
_, err = fserrors.Cause(err)
if err != fs.ErrorDirNotFound {

View File

@@ -6,13 +6,8 @@ import (
"github.com/spf13/cobra"
)
var (
createEmptySrcDirs = false
)
func init() {
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
}
var commandDefintion = &cobra.Command{
@@ -44,7 +39,7 @@ go there.
cmd.CheckArgs(2, 2, command, args)
fsrc, fdst := cmd.NewFsSrcDst(args)
cmd.Run(true, true, command, func() error {
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
return sync.Sync(fdst, fsrc)
})
},
}

View File

@@ -29,7 +29,6 @@ Rclone is a command line program to sync files and directories to and from:
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}

View File

@@ -243,8 +243,3 @@ Contributors
* calisro <robert.calistri@gmail.com>
* Dr.Rx <david.rey@nventive.com>
* marcintustin <marcintustin@users.noreply.github.com>
* jaKa Močnik <jaka@koofr.net>
* Fionera <fionera@fionera.de>
* Dan Walters <dan@walters.io>
* Danil Semelenov <sgtpep@users.noreply.github.com>
* xopez <28950736+xopez@users.noreply.github.com>

View File

@@ -1,7 +1,7 @@
---
title: "Documentation"
description: "Rclone Usage"
date: "2019-02-25"
date: "2015-06-06"
---
Configure
@@ -34,7 +34,6 @@ See the following for detailed instructions for
* [HTTP](/http/)
* [Hubic](/hubic/)
* [Jottacloud](/jottacloud/)
* [Koofr](/koofr/)
* [Mega](/mega/)
* [Microsoft Azure Blob Storage](/azureblob/)
* [Microsoft OneDrive](/onedrive/)
@@ -821,16 +820,6 @@ then the files will have SUFFIX added on to them.
See `--backup-dir` for more info.
### --suffix-keep-extension ###
When using `--suffix`, setting this causes rclone put the SUFFIX
before the extension of the files that it backs up rather than after.
So let's say we had `--suffix -2019-01-01`, without the flag `file.txt`
would be backed up to `file.txt-2019-01-01` and with the flag it would
be backed up to `file-2019-01-01.txt`. This can be helpful to make
sure the suffixed files can still be opened.
### --syslog ###
On capable OSes (not Windows or Plan9) send all log output to syslog.

View File

@@ -188,10 +188,3 @@ causes not all domains to be resolved properly.
Additionally with the `GODEBUG=netdns=` environment variable the Go
resolver decision can be influenced. This also allows to resolve certain
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
### The total size reported in the stats for a sync is wrong and keeps changing
It is likely you have more than 10,000 files that need to be
synced. By default rclone only gets 10,000 files ahead in a sync so as
not to use up too much memory. You can change this default with the
[--max-backlog](/docs/#max-backlog-n) flag.

View File

@@ -342,27 +342,6 @@ Access Control List for new buckets.
- "publicReadWrite"
- Project team owners get OWNER access, and all Users get WRITER access.
#### --gcs-bucket-policy-only
Access checks should use bucket-level IAM policies.
If you want to upload objects to a bucket with Bucket Policy Only set
then you will need to set this.
When it is set, rclone:
- ignores ACLs set on buckets
- ignores ACLs set on objects
- creates buckets with Bucket Policy Only set
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
- Config: bucket_policy_only
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
- Type: bool
- Default: false
#### --gcs-location
Location for the newly created buckets.

View File

@@ -1,189 +0,0 @@
---
title: "Koofr"
description: "Rclone docs for Koofr"
date: "2019-02-25"
---
<i class="fa fa-suitcase"></i> Koofr
-----------------------------------------
Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
The initial setup for Koofr involves creating an application password for
rclone. You can do that by opening the Koofr
[web application](https://app.koofr.net/app/admin/preferences/password),
giving the password a nice name like `rclone` and clicking on generate.
Here is an example of how to make a remote called `koofr`. First run:
rclone config
This will guide you through an interactive setup process:
```
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
name> koofr
Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / A stackable unification remote, which can appear to merge the contents of several remotes
\ "union"
2 / Alias for a existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
\ "s3"
5 / Backblaze B2
\ "b2"
6 / Box
\ "box"
7 / Cache a remote
\ "cache"
8 / Dropbox
\ "dropbox"
9 / Encrypt/Decrypt a remote
\ "crypt"
10 / FTP Connection
\ "ftp"
11 / Google Cloud Storage (this is not Google Drive)
\ "google cloud storage"
12 / Google Drive
\ "drive"
13 / Hubic
\ "hubic"
14 / JottaCloud
\ "jottacloud"
15 / Koofr
\ "koofr"
16 / Local Disk
\ "local"
17 / Mega
\ "mega"
18 / Microsoft Azure Blob Storage
\ "azureblob"
19 / Microsoft OneDrive
\ "onedrive"
20 / OpenDrive
\ "opendrive"
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
\ "swift"
22 / Pcloud
\ "pcloud"
23 / QingCloud Object Storage
\ "qingstor"
24 / SSH/SFTP Connection
\ "sftp"
25 / Webdav
\ "webdav"
26 / Yandex Disk
\ "yandex"
27 / http Connection
\ "http"
Storage> koofr
** See help for koofr backend at: https://rclone.org/koofr/ **
Your Koofr user name
Enter a string value. Press Enter for the default ("").
user> USER@NAME
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
y) Yes type in my own password
g) Generate random password
y/g> y
Enter the password:
password:
Confirm the password:
password:
Edit advanced config? (y/n)
y) Yes
n) No
y/n> n
Remote config
--------------------
[koofr]
type = koofr
baseurl = https://app.koofr.net
user = USER@NAME
password = *** ENCRYPTED ***
--------------------
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
```
You can choose to edit advanced config in order to enter your own service URL
if you use an on-premise or white label Koofr instance, or choose an alternative
mount instead of your primary storage.
Once configured you can then use `rclone` like this,
List directories in top level of your Koofr
rclone lsd koofr:
List all the files in your Koofr
rclone ls koofr:
To copy a local directory to an Koofr directory called backup
rclone copy /home/source remote:backup
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
### Standard Options
Here are the standard options specific to koofr (Koofr).
#### --koofr-user
Your Koofr user name
- Config: user
- Env Var: RCLONE_KOOFR_USER
- Type: string
- Default: ""
#### --koofr-password
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
- Config: password
- Env Var: RCLONE_KOOFR_PASSWORD
- Type: string
- Default: ""
### Advanced Options
Here are the advanced options specific to koofr (Koofr).
#### --koofr-baseurl
Base URL of the Koofr API to connect to
- Config: baseurl
- Env Var: RCLONE_KOOFR_BASEURL
- Type: string
- Default: "https://app.koofr.net"
#### --koofr-mountid
Mount ID of the mount to use. If omitted, the primary mount is used.
- Config: mountid
- Env Var: RCLONE_KOOFR_MOUNTID
- Type: string
- Default: ""
<!--- autogenerated options stop -->
### Limitations ###
Note that Koofr is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".

View File

@@ -298,13 +298,4 @@ Description: Using application 'rclone' is currently not supported for your orga
This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint
```
Error: invalid_grant
Code: AADSTS50076
Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
```
If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
However, there are other ways to interact with your OneDrive account. Have a look at the webdav backend: https://rclone.org/webdav/#sharepoint

View File

@@ -2,7 +2,7 @@
title: "Overview of cloud storage systems"
description: "Overview of cloud storage systems"
type: page
date: "2019-02-25"
date: "2015-09-06"
---
# Overview of cloud storage systems #
@@ -28,7 +28,6 @@ Here is an overview of the major features of each cloud storage system.
| HTTP | - | No | No | No | R |
| Hubic | MD5 | Yes | No | No | R/W |
| Jottacloud | MD5 | Yes | Yes | No | R/W |
| Koofr | MD5 | No | Yes | No | - |
| Mega | - | No | No | Yes | - |
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |

View File

@@ -556,21 +556,6 @@ This takes the following parameters
Authentication is required for this call.
### operations/publiclink: Create or retrieve a public link to the given file or folder.
This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
Returns
- url - URL of the resource
See the [link command](/commands/rclone_link/) command for more information on the above.
Authentication is required for this call.
### operations/purge: Remove a directory or container and all of its contents
This takes the following parameters

View File

@@ -1112,11 +1112,6 @@ server_side_encryption =
storage_class =
```
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
the config file as `upload_cutoff 0` to work around a bug which causes
uploading of small files to fail.
Note also that Ceph sometimes puts `/` in the passwords it gives
users. If you read the secret access key using the command line tools
you will get a JSON blob with the `/` escaped as `\/`. Make sure you

View File

@@ -2,7 +2,7 @@
<div class="row">
<hr>
<div class="col-sm-12">
<p>&copy; <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2019<br>
<p>&copy; <a href="https://www.craig-wood.com/nick/">Nick Craig-Wood</a> 2014-2017<br>
Website hosted on a <a href="https://www.memset.com/dedicated-servers/vps/"><span style="font-weight: bold; font-family: arial black, arial, sans-serif; font-style: italic;">MEMSET CLOUD VPS</span></a>,
uploaded with <a href="https://rclone.org">rclone</a>
and built with <a href="https://github.com/spf13/hugo">Hugo</a></p>

View File

@@ -67,7 +67,6 @@
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>

View File

@@ -67,7 +67,6 @@ type ConfigInfo struct {
DataRateUnit string
BackupDir string
Suffix string
SuffixKeepExtension bool
UseListR bool
BufferSize SizeSuffix
BwLimit BwTimetable

View File

@@ -68,7 +68,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix for use with --backup-dir.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")

View File

@@ -21,9 +21,8 @@ var Active = mustNewFilter(nil)
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
boundedRecursion bool
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
@@ -47,14 +46,13 @@ type rules struct {
}
// add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp, boundedRecursion bool) {
func (rs *rules) add(Include bool, re *regexp.Regexp) {
if rs.existing == nil {
rs.existing = make(map[string]struct{})
}
newRule := rule{
Include: Include,
Regexp: re,
boundedRecursion: boundedRecursion,
Include: Include,
Regexp: re,
}
newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok {
@@ -75,23 +73,6 @@ func (rs *rules) len() int {
return len(rs.rules)
}
// boundedRecursion returns true if the set of filters would only
// need bounded recursion to evaluate
func (rs *rules) boundedRecursion() bool {
var (
excludeAll = false
boundedRecursion = true
)
for _, rule := range rs.rules {
if rule.Include {
boundedRecursion = boundedRecursion && rule.boundedRecursion
} else if rule.Regexp.String() == `^.*$` {
excludeAll = true
}
}
return excludeAll && boundedRecursion
}
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
@@ -251,8 +232,7 @@ func (f *Filter) addDirGlobs(Include bool, glob string) error {
if err != nil {
return err
}
boundedRecursion := globBoundedRecursion(dirGlob)
f.dirRules.add(Include, dirRe, boundedRecursion)
f.dirRules.add(Include, dirRe)
}
return nil
}
@@ -268,9 +248,8 @@ func (f *Filter) Add(Include bool, glob string) error {
if err != nil {
return err
}
boundedRecursion := globBoundedRecursion(glob)
if isFileRule {
f.fileRules.add(Include, re, boundedRecursion)
f.fileRules.add(Include, re)
// If include rule work out what directories are needed to scan
// if exclude rule, we can't rule anything out
// Unless it is `*` which matches everything
@@ -283,7 +262,7 @@ func (f *Filter) Add(Include bool, glob string) error {
}
}
if isDirRule {
f.dirRules.add(Include, re, boundedRecursion)
f.dirRules.add(Include, re)
}
return nil
}
@@ -364,12 +343,6 @@ func (f *Filter) InActive() bool {
len(f.Opt.ExcludeFile) == 0)
}
// BoundedRecursion returns true if the filter can be evaluated with
// bounded recursion only.
func (f *Filter) BoundedRecursion() bool {
return f.fileRules.boundedRecursion()
}
// includeRemote returns whether this remote passes the filter rules.
func (f *Filter) includeRemote(remote string) bool {
for _, rule := range f.fileRules.rules {

View File

@@ -25,7 +25,6 @@ func TestNewFilterDefault(t *testing.T) {
assert.Len(t, f.dirRules.rules, 0)
assert.Nil(t, f.files)
assert.True(t, f.InActive())
assert.False(t, f.BoundedRecursion())
}
// testFile creates a temp file with the contents
@@ -104,38 +103,6 @@ func TestNewFilterFull(t *testing.T) {
}
}
assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
}
func TestFilterBoundedRecursion(t *testing.T) {
for _, test := range []struct {
in string
want bool
}{
{"", false},
{"- /**", true},
{"+ *.jpg", false},
{"+ *.jpg\n- /**", false},
{"+ /*.jpg\n- /**", true},
{"+ *.png\n+ /*.jpg\n- /**", false},
{"+ /*.png\n+ /*.jpg\n- /**", true},
{"- *.jpg\n- /**", true},
{"+ /*.jpg\n- /**", true},
{"+ /*dir/\n- /**", true},
{"+ /*dir/\n", false},
{"+ /*dir/**\n- /**", false},
{"+ **/pics*/*.jpg\n- /**", false},
} {
f, err := NewFilter(nil)
require.NoError(t, err)
for _, rule := range strings.Split(test.in, "\n") {
if rule != "" {
require.NoError(t, f.AddRule(rule))
}
}
got := f.BoundedRecursion()
assert.Equal(t, test.want, got, test.in)
}
}
type includeTest struct {
@@ -184,7 +151,6 @@ func TestNewFilterIncludeFiles(t *testing.T) {
{"file3.jpg", 3, 0, false},
})
assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
}
func TestNewFilterIncludeFilesDirs(t *testing.T) {
@@ -312,7 +278,6 @@ func TestNewFilterMinSize(t *testing.T) {
{"potato/file2.jpg", 99, 0, false},
})
assert.False(t, f.InActive())
assert.False(t, f.BoundedRecursion())
}
func TestNewFilterMaxSize(t *testing.T) {

View File

@@ -13,15 +13,9 @@ var (
Opt = filter.DefaultOpt
)
// Reload the filters from the flags
func Reload() (err error) {
filter.Active, err = filter.NewFilter(&Opt)
return err
}
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOptionReload("filter", &Opt, Reload)
rc.AddOption("filter", &Opt)
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")

View File

@@ -167,15 +167,3 @@ func globToDirGlobs(glob string) (out []string) {
return out
}
// globBoundedRecursion returns true if the glob only needs bounded
// recursion in the file tree to evaluate.
func globBoundedRecursion(glob string) bool {
if strings.Contains(glob, "**") {
return false
}
if strings.HasPrefix(glob, "/") {
return true
}
return false
}

View File

@@ -108,45 +108,3 @@ func TestGlobToDirGlobs(t *testing.T) {
assert.Equal(t, test.want, got, test.in)
}
}
func TestGlobBoundedRecursion(t *testing.T) {
for _, test := range []struct {
in string
want bool
}{
{`*`, false},
{`/*`, true},
{`/**`, false},
{`*.jpg`, false},
{`/*.jpg`, true},
{`/a/*.jpg`, true},
{`/a/b/*.jpg`, true},
{`*/*/*.jpg`, false},
{`a/b/`, false},
{`a/b`, false},
{`a/b/*.{png,gif}`, false},
{`/a/{jpg,png,gif}/*.{jpg,true,gif}`, true},
{`a/{a,a*b,a**c}/d/`, false},
{`/a/{a,a*b,a/c,d}/d/`, true},
{`**`, false},
{`a**`, false},
{`a**b`, false},
{`a**b**c**d`, false},
{`a**b/c**d`, false},
{`/A/a**b/B/c**d/C/`, false},
{`/var/spool/**/ncw`, false},
{`var/spool/**/ncw/`, false},
{"/file1.jpg", true},
{"/file2.png", true},
{"/*.jpg", true},
{"/*.png", true},
{"/potato", true},
{"/sausage1", true},
{"/sausage2*", true},
{"/sausage3**", false},
{"/a/*.jpg", true},
} {
got := globBoundedRecursion(test.in)
assert.Equal(t, test.want, got, test.in)
}
}

View File

@@ -409,7 +409,6 @@ type Features struct {
BucketBased bool // is bucket based (like s3, swift etc)
SetTier bool // allows set tier functionality on objects
GetTier bool // allows to retrieve storage tier of objects
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
// Purge all files in the root and the root directory
//

View File

@@ -191,22 +191,25 @@ var _ pflag.Value = (*DeduplicateMode)(nil)
// dedupeFindDuplicateDirs scans f for duplicate directories
func dedupeFindDuplicateDirs(f fs.Fs) ([][]fs.Directory, error) {
dirs := map[string][]fs.Directory{}
err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error {
duplicateDirs := [][]fs.Directory{}
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
dirs := map[string][]fs.Directory{}
entries.ForDir(func(d fs.Directory) {
dirs[d.Remote()] = append(dirs[d.Remote()], d)
})
for _, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
}
}
return nil
})
if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs")
}
duplicateDirs := [][]fs.Directory{}
for _, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
}
}
return duplicateDirs, nil
}
@@ -265,7 +268,10 @@ func Deduplicate(f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files
files := map[string][]fs.Object{}
err := walk.ListR(f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
err := walk.Walk(f, "", true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
entries.ForObject(func(o fs.Object) {
remote := o.Remote()
files[remote] = append(files[remote], o)

View File

@@ -161,7 +161,10 @@ func TestDeduplicateRename(t *testing.T) {
err := operations.Deduplicate(r.Fremote, operations.DeduplicateRename)
require.NoError(t, err)
require.NoError(t, walk.ListR(r.Fremote, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
require.NoError(t, walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
entries.ForObject(func(o fs.Object) {
remote := o.Remote()
if remote != "one-1.txt" &&

View File

@@ -89,7 +89,12 @@ func ListJSON(fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJS
}
}
format := formatForPrecision(fsrc.Precision())
err := walk.ListR(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) {
err := walk.Walk(fsrc, remote, false, ConfigMaxDepth(opt.Recurse), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}
for _, entry := range entries {
item := ListJSONItem{
Path: entry.Remote(),

View File

@@ -273,7 +273,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) {
newDst, err = doCopy(src, remote)
if err == nil {
dst = newDst
@@ -392,7 +392,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
return newDst, nil
}
// See if we have Move available
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) {
// Delete destination if it exists
if dst != nil {
err = DeleteFile(dst)
@@ -435,20 +435,6 @@ func CanServerSideMove(fdst fs.Fs) bool {
return canMove || canCopy
}
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
func SuffixName(remote string) string {
if fs.Config.Suffix == "" {
return remote
}
if fs.Config.SuffixKeepExtension {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
return base + fs.Config.Suffix + ext
}
return remote + fs.Config.Suffix
}
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
// and accumulating stats and errors.
//
@@ -470,7 +456,7 @@ func DeleteFileWithBackupDir(dst fs.Object, backupDir fs.Fs) (err error) {
if !SameConfig(dst.Fs(), backupDir) {
err = errors.New("parameter to --backup-dir has to be on the same remote as destination")
} else {
remoteWithSuffix := SuffixName(dst.Remote())
remoteWithSuffix := dst.Remote() + fs.Config.Suffix
overwritten, _ := backupDir.NewObject(remoteWithSuffix)
_, err = Move(backupDir, overwritten, remoteWithSuffix, dst)
}
@@ -539,11 +525,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
return DeleteFilesWithBackupDir(toBeDeleted, nil)
}
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType(fdst, fsrc fs.Info) bool {
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
}
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
func SameConfig(fdst, fsrc fs.Info) bool {
@@ -830,7 +811,11 @@ func CheckDownload(fdst, fsrc fs.Fs, oneway bool) error {
//
// Lists in parallel which may get them out of order
func ListFn(f fs.Fs, fn func(fs.Object)) error {
return walk.ListR(f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
return walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
// FIXME count errors and carry on for listing
return err
}
entries.ForObject(fn)
return nil
})
@@ -946,7 +931,11 @@ func ConfigMaxDepth(recursive bool) int {
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f fs.Fs, w io.Writer) error {
return walk.ListR(f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
return walk.Walk(f, "", false, ConfigMaxDepth(false), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
// FIXME count errors and carry on for listing
return err
}
entries.ForDir(func(dir fs.Directory) {
if dir != nil {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime().Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
@@ -1054,17 +1043,21 @@ func listToChan(f fs.Fs, dir string) fs.ObjectsChan {
o := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(o)
err := walk.ListR(f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
_ = walk.Walk(f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
if err == fs.ErrorDirNotFound {
return nil
}
err = errors.Errorf("Failed to list: %v", err)
fs.CountError(err)
fs.Errorf(nil, "%v", err)
return nil
}
entries.ForObject(func(obj fs.Object) {
o <- obj
})
return nil
})
if err != nil && err != fs.ErrorDirNotFound {
err = errors.Wrap(err, "failed to list")
fs.CountError(err)
fs.Errorf(nil, "%v", err)
}
}()
return o
}

View File

@@ -231,33 +231,6 @@ func TestHashSums(t *testing.T) {
}
}
func TestSuffixName(t *testing.T) {
origSuffix, origKeepExt := fs.Config.Suffix, fs.Config.SuffixKeepExtension
defer func() {
fs.Config.Suffix, fs.Config.SuffixKeepExtension = origSuffix, origKeepExt
}()
for _, test := range []struct {
remote string
suffix string
keepExt bool
want string
}{
{"test.txt", "", false, "test.txt"},
{"test.txt", "", true, "test.txt"},
{"test.txt", "-suffix", false, "test.txt-suffix"},
{"test.txt", "-suffix", true, "test-suffix.txt"},
{"test.txt.csv", "-suffix", false, "test.txt.csv-suffix"},
{"test.txt.csv", "-suffix", true, "test.txt-suffix.csv"},
{"test", "-suffix", false, "test-suffix"},
{"test", "-suffix", true, "test-suffix"},
} {
fs.Config.Suffix = test.suffix
fs.Config.SuffixKeepExtension = test.keepExt
got := operations.SuffixName(test.remote)
assert.Equal(t, test.want, got, fmt.Sprintf("%+v", test))
}
}
func TestCount(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()

View File

@@ -172,7 +172,7 @@ See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for
}
}
// Run a single command, eg Mkdir
// Mkdir a directory
func rcSingleCommand(in rc.Params, name string, noRemote bool) (out rc.Params, err error) {
var (
f fs.Fs
@@ -240,7 +240,7 @@ See the [size command](/commands/rclone_size/) command for more information on t
})
}
// Size a directory
// Mkdir a directory
func rcSize(in rc.Params) (out rc.Params, err error) {
f, err := rc.GetFs(in)
if err != nil {
@@ -255,38 +255,3 @@ func rcSize(in rc.Params) (out rc.Params, err error) {
out["bytes"] = bytes
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "operations/publiclink",
AuthRequired: true,
Fn: rcPublicLink,
Title: "Create or retrieve a public link to the given file or folder.",
Help: `This takes the following parameters
- fs - a remote name string eg "drive:"
- remote - a path within that remote eg "dir"
Returns
- url - URL of the resource
See the [link command](/commands/rclone_link/) command for more information on the above.
`,
})
}
// Make a public link
func rcPublicLink(in rc.Params) (out rc.Params, err error) {
f, remote, err := rc.GetFsAndRemote(in)
if err != nil {
return nil, err
}
url, err := PublicLink(f, remote)
if err != nil {
return nil, err
}
out = make(rc.Params)
out["url"] = url
return out, nil
}

View File

@@ -356,16 +356,3 @@ func TestRcSize(t *testing.T) {
"bytes": int64(120),
}, out)
}
// operations/publiclink: Create or retrieve a public link to the given file or folder.
func TestRcPublicLink(t *testing.T) {
r, call := rcNewRun(t, "operations/publiclink")
defer r.Finalise()
in := rc.Params{
"fs": r.FremoteName,
"remote": "",
}
_, err := call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "doesn't support public links")
}

View File

@@ -8,23 +8,13 @@ import (
"github.com/pkg/errors"
)
var (
optionBlock = map[string]interface{}{}
optionReload = map[string]func() error{}
)
var optionBlock = map[string]interface{}{}
// AddOption adds an option set
func AddOption(name string, option interface{}) {
optionBlock[name] = option
}
// AddOptionReload adds an option set with a reload function to be
// called when options are changed
func AddOptionReload(name string, option interface{}, reload func() error) {
optionBlock[name] = option
optionReload[name] = reload
}
func init() {
Add(Call{
Path: "options/blocks",
@@ -113,12 +103,7 @@ func rcOptionsSet(in Params) (out Params, err error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
}
if reload := optionReload[name]; reload != nil {
err = reload()
if err != nil {
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
}
}
}
return out, nil
}

View File

@@ -1,10 +1,8 @@
package rc
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -26,21 +24,9 @@ func TestAddOption(t *testing.T) {
assert.Equal(t, len(optionBlock), 0)
AddOption("potato", &testOptions)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 0)
assert.Equal(t, &testOptions, optionBlock["potato"])
}
func TestAddOptionReload(t *testing.T) {
defer clearOptionBlock()
assert.Equal(t, len(optionBlock), 0)
reload := func() error { return nil }
AddOptionReload("potato", &testOptions, reload)
assert.Equal(t, len(optionBlock), 1)
assert.Equal(t, len(optionReload), 1)
assert.Equal(t, &testOptions, optionBlock["potato"])
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
}
func TestOptionsBlocks(t *testing.T) {
defer clearOptionBlock()
AddOption("potato", &testOptions)
@@ -67,14 +53,7 @@ func TestOptionsGet(t *testing.T) {
func TestOptionsSet(t *testing.T) {
defer clearOptionBlock()
var reloaded int
AddOptionReload("potato", &testOptions, func() error {
if reloaded > 0 {
return errors.New("error while reloading")
}
reloaded++
return nil
})
AddOption("potato", &testOptions)
call := Calls.Get("options/set")
require.NotNil(t, call)
@@ -88,12 +67,6 @@ func TestOptionsSet(t *testing.T) {
require.Nil(t, out)
assert.Equal(t, 50, testOptions.Int)
assert.Equal(t, "hello", testOptions.String)
assert.Equal(t, 1, reloaded)
// error from reload
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "error while reloading")
// unknown option block
in = Params{
@@ -112,5 +85,4 @@ func TestOptionsSet(t *testing.T) {
_, err = call.Fn(in)
require.Error(t, err)
assert.Contains(t, err.Error(), "failed to write options")
}

View File

@@ -188,8 +188,8 @@ func rcJobStatus(in Params) (out Params, err error) {
defer job.mu.Unlock()
out = make(Params)
err = Reshape(&out, job)
if err != nil {
return nil, errors.Wrap(err, "reshape failed in job status")
if job == nil {
return nil, errors.New("Reshape failed in job status")
}
return out, nil
}

View File

@@ -39,21 +39,17 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
if err != nil {
return nil, err
}
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
switch name {
case "sync":
return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
return nil, Sync(dstFs, srcFs)
case "copy":
return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
return nil, CopyDir(dstFs, srcFs)
case "move":
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
}
panic("unknown rcSyncCopyMove type")
}

View File

@@ -24,7 +24,6 @@ type syncCopyMove struct {
fsrc fs.Fs
deleteMode fs.DeleteMode // how we are doing deletions
DoMove bool
copyEmptySrcDirs bool
deleteEmptySrcDirs bool
dir string
// internal state
@@ -64,7 +63,7 @@ type syncCopyMove struct {
suffix string // suffix to add to files placed in backupDir
}
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
return nil, fserrors.FatalError(fs.ErrorOverlapping)
}
@@ -73,7 +72,6 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
fsrc: fsrc,
deleteMode: deleteMode,
DoMove: DoMove,
copyEmptySrcDirs: copyEmptySrcDirs,
deleteEmptySrcDirs: deleteEmptySrcDirs,
dir: "",
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
@@ -226,7 +224,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
} else {
// If destination already exists, then we must move it into --backup-dir if required
if pair.Dst != nil && s.backupDir != nil {
remoteWithSuffix := operations.SuffixName(pair.Dst.Remote())
remoteWithSuffix := pair.Dst.Remote() + s.suffix
overwritten, _ := s.backupDir.NewObject(remoteWithSuffix)
_, err := operations.Move(s.backupDir, overwritten, remoteWithSuffix, pair.Dst)
if err != nil {
@@ -691,9 +689,7 @@ func (s *syncCopyMove) run() error {
s.stopTransfers()
s.stopDeleters()
if s.copyEmptySrcDirs {
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
}
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
// Delete files after
if s.deleteMode == fs.DeleteModeAfter {
@@ -856,7 +852,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
// If DoMove is true then files will be moved instead of copied
//
// dir is the start directory, "" for root
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
if deleteMode != fs.DeleteModeOff && DoMove {
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
}
@@ -866,7 +862,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
}
// only delete stuff during in this pass
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
if err != nil {
return err
}
@@ -877,7 +873,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
// Next pass does a copy only
deleteMode = fs.DeleteModeOff
}
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
if err != nil {
return err
}
@@ -885,22 +881,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
}
// Sync fsrc into fdst
func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
func Sync(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
}
// CopyDir copies fsrc into fdst
func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
func CopyDir(fdst, fsrc fs.Fs) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
}
// moveDir moves fsrc into fdst
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
}
// MoveDir moves fsrc into fdst
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
if operations.Same(fdst, fsrc) {
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
return nil
@@ -928,5 +924,5 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) e
}
// Otherwise move the files one by one
return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
}

View File

@@ -40,7 +40,7 @@ func TestCopyWithDryRun(t *testing.T) {
r.Mkdir(r.Fremote)
fs.Config.DryRun = true
err := CopyDir(r.Fremote, r.Flocal, false)
err := CopyDir(r.Fremote, r.Flocal)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -55,7 +55,7 @@ func TestCopy(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
r.Mkdir(r.Fremote)
err := CopyDir(r.Fremote, r.Flocal, false)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -72,7 +72,7 @@ func TestCopyNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := CopyDir(r.Fremote, r.Flocal, false)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -90,7 +90,7 @@ func TestSyncNoTraverse(t *testing.T) {
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -108,7 +108,7 @@ func TestCopyWithDepth(t *testing.T) {
fs.Config.MaxDepth = 1
defer func() { fs.Config.MaxDepth = -1 }()
err := CopyDir(r.Fremote, r.Flocal, false)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
@@ -136,7 +136,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
}
defer unpatch()
err = CopyDir(r.Fremote, r.Flocal, false)
err = CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
unpatch()
@@ -153,59 +153,7 @@ func TestCopyEmptyDirectories(t *testing.T) {
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = CopyDir(r.Fremote, r.Flocal, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test move empty directories
func TestMoveEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = MoveDir(r.Fremote, r.Flocal, false, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
t,
r.Fremote,
[]fstest.Item{
file1,
},
[]string{
"sub dir",
"sub dir2",
},
fs.GetModifyWindow(r.Fremote),
)
}
// Test sync empty directories
func TestSyncEmptyDirectories(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
err := operations.Mkdir(r.Flocal, "sub dir2")
require.NoError(t, err)
r.Mkdir(r.Fremote)
err = Sync(r.Fremote, r.Flocal, true)
err = CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -234,7 +182,7 @@ func TestServerSideCopy(t *testing.T) {
defer finaliseCopy()
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
err = CopyDir(FremoteCopy, r.Fremote, false)
err = CopyDir(FremoteCopy, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, FremoteCopy, file1)
@@ -252,7 +200,7 @@ func TestCopyAfterDelete(t *testing.T) {
err := operations.Mkdir(r.Flocal, "")
require.NoError(t, err)
err = CopyDir(r.Fremote, r.Flocal, false)
err = CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal)
@@ -266,7 +214,7 @@ func TestCopyRedownload(t *testing.T) {
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
fstest.CheckItems(t, r.Fremote, file1)
err := CopyDir(r.Flocal, r.Fremote, false)
err := CopyDir(r.Flocal, r.Fremote)
require.NoError(t, err)
// Test with combined precision of local and remote as we copied it there and back
@@ -286,7 +234,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -298,7 +246,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
@@ -320,7 +268,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -332,7 +280,7 @@ func TestSyncSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
@@ -354,7 +302,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file.
@@ -366,7 +314,7 @@ func TestSyncIgnoreSize(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred no files
@@ -382,7 +330,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly 0 files because the
@@ -393,7 +341,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
defer func() { fs.Config.IgnoreTimes = false }()
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file even though the
@@ -413,7 +361,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
defer func() { fs.Config.IgnoreExisting = false }()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -421,7 +369,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
// Change everything
r.WriteFile("existing", "newpotatoes", t2)
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// Items should not change
fstest.CheckItems(t, r.Fremote, file1)
@@ -469,7 +417,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
accounting.Stats.ResetCounters()
fs.CountError(nil)
assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
assert.NoError(t, Sync(r.Fremote, r.Flocal))
fstest.CheckListingWithPrecision(
t,
@@ -512,7 +460,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
defer func() { fs.Config.DryRun = false }()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -521,7 +469,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
fs.Config.DryRun = false
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -549,7 +497,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -570,7 +518,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
@@ -590,7 +538,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file2)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -605,7 +553,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -628,7 +576,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file2)
@@ -644,7 +592,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
fs.Config.DryRun = true
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
fs.Config.DryRun = false
require.NoError(t, err)
@@ -663,7 +611,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file1, file3)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1, file3)
fstest.CheckItems(t, r.Fremote, file1, file3)
@@ -709,7 +657,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -779,7 +727,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
accounting.Stats.ResetCounters()
fs.CountError(nil)
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
assert.Equal(t, fs.ErrorNotDeleting, err)
fstest.CheckListingWithPrecision(
@@ -856,7 +804,7 @@ func TestCopyDeleteBefore(t *testing.T) {
fstest.CheckItems(t, r.Flocal, file2)
accounting.Stats.ResetCounters()
err := CopyDir(r.Fremote, r.Flocal, false)
err := CopyDir(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file1, file2)
@@ -879,14 +827,14 @@ func TestSyncWithExclude(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2, file1)
// Now sync the other way round and check enormous doesn't get
// deleted as it is excluded from the sync
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote, false)
err = Sync(r.Flocal, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
}
@@ -909,14 +857,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, file2)
// Check sync the other way round to make sure enormous gets
// deleted even though it is excluded
accounting.Stats.ResetCounters()
err = Sync(r.Flocal, r.Fremote, false)
err = Sync(r.Flocal, r.Fremote)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file2)
}
@@ -951,7 +899,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
}()
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
}
@@ -975,7 +923,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 := r.WriteFile("yam", "Yam Content", t2)
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
require.NoError(t, Sync(r.Fremote, r.Flocal))
fstest.CheckItems(t, r.Fremote, f1, f2)
fstest.CheckItems(t, r.Flocal, f1, f2)
@@ -984,7 +932,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
f2 = r.RenameFile(f2, "yaml")
accounting.Stats.ResetCounters()
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
require.NoError(t, Sync(r.Fremote, r.Flocal))
fstest.CheckItems(t, r.Fremote, f1, f2)
@@ -1021,7 +969,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Do server side move
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
require.NoError(t, err)
if withFilter {
@@ -1048,7 +996,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
// Move it back to a new empty remote, dst does not exist this time
accounting.Stats.ResetCounters()
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
require.NoError(t, err)
if withFilter {
@@ -1073,7 +1021,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
r.Mkdir(r.Fremote)
// run move with --delete-empty-src-dirs
err := MoveDir(r.Fremote, r.Flocal, true, false)
err := MoveDir(r.Fremote, r.Flocal, true)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -1093,7 +1041,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
r.Mkdir(r.Fremote)
err := MoveDir(r.Fremote, r.Flocal, false, false)
err := MoveDir(r.Fremote, r.Flocal, false)
require.NoError(t, err)
fstest.CheckListingWithPrecision(
@@ -1154,7 +1102,7 @@ func TestServerSideMoveOverlap(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file1)
// Subdir move with no filters should return ErrorCantMoveOverlapping
err = MoveDir(FremoteMove, r.Fremote, false, false)
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
@@ -1162,7 +1110,7 @@ func TestServerSideMoveOverlap(t *testing.T) {
defer func() {
filter.Active.Opt.MinSize = -1
}()
err = MoveDir(FremoteMove, r.Fremote, false, false)
err = MoveDir(FremoteMove, r.Fremote, false)
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
}
@@ -1181,14 +1129,14 @@ func TestSyncOverlap(t *testing.T) {
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
}
checkErr(Sync(FremoteSync, r.Fremote, false))
checkErr(Sync(r.Fremote, FremoteSync, false))
checkErr(Sync(r.Fremote, r.Fremote, false))
checkErr(Sync(FremoteSync, FremoteSync, false))
checkErr(Sync(FremoteSync, r.Fremote))
checkErr(Sync(r.Fremote, FremoteSync))
checkErr(Sync(r.Fremote, r.Fremote))
checkErr(Sync(FremoteSync, FremoteSync))
}
// Test with BackupDir set
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
func testSyncBackupDir(t *testing.T, suffix string) {
r := fstest.NewRun(t)
defer r.Finalise()
@@ -1199,18 +1147,16 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
fs.Config.BackupDir = r.FremoteName + "/backup"
fs.Config.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension
defer func() {
fs.Config.BackupDir = ""
fs.Config.Suffix = ""
fs.Config.SuffixKeepExtension = false
}()
// Make the setup so we have one, two, three in the dest
// and one (different), two (same) in the source
file1 := r.WriteObject("dst/one", "one", t1)
file2 := r.WriteObject("dst/two", "two", t1)
file3 := r.WriteObject("dst/three.txt", "three", t1)
file3 := r.WriteObject("dst/three", "three", t1)
file2a := r.WriteFile("two", "two", t1)
file1a := r.WriteFile("one", "oneA", t2)
@@ -1221,7 +1167,7 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
require.NoError(t, err)
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal, false)
err = Sync(fdst, r.Flocal)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -1229,24 +1175,20 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
file1a.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3.Path = "backup/three" + suffix + ".txt"
} else {
file3.Path = "backup/three.txt" + suffix
}
file3.Path = "backup/three" + suffix
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
// Now check what happens if we do it again
// Restore a different three and update one in the source
file3a := r.WriteObject("dst/three.txt", "threeA", t2)
file3a := r.WriteObject("dst/three", "threeA", t2)
file1b := r.WriteFile("one", "oneBB", t3)
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a, file3a)
// This should delete three and overwrite one again, checking
// the files got overwritten correctly in backup-dir
accounting.Stats.ResetCounters()
err = Sync(fdst, r.Flocal, false)
err = Sync(fdst, r.Flocal)
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
@@ -1254,17 +1196,12 @@ func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
file1b.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3a.Path = "backup/three" + suffix + ".txt"
} else {
file3a.Path = "backup/three.txt" + suffix
}
file3a.Path = "backup/three" + suffix
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
}
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) { testSyncBackupDir(t, "-2019-01-01", true) }
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "") }
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak") }
// Check we can sync two files with differing UTF-8 representations
func TestSyncUTFNorm(t *testing.T) {
@@ -1288,7 +1225,7 @@ func TestSyncUTFNorm(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
// We should have transferred exactly one file, but kept the
@@ -1314,7 +1251,7 @@ func TestSyncImmutable(t *testing.T) {
// Should succeed
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
require.NoError(t, err)
fstest.CheckItems(t, r.Flocal, file1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -1326,7 +1263,7 @@ func TestSyncImmutable(t *testing.T) {
// Should fail with ErrorImmutableModified and not modify local or remote files
accounting.Stats.ResetCounters()
err = Sync(r.Fremote, r.Flocal, false)
err = Sync(r.Fremote, r.Flocal)
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
fstest.CheckItems(t, r.Flocal, file2)
fstest.CheckItems(t, r.Fremote, file1)
@@ -1362,6 +1299,6 @@ func TestAbort(t *testing.T) {
accounting.Stats.ResetCounters()
err := Sync(r.Fremote, r.Flocal, false)
err := Sync(r.Fremote, r.Flocal)
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
}

View File

@@ -51,7 +51,7 @@ type Func func(path string, entries fs.DirEntries, err error) error
//
// Parent directories are always listed before their children
//
// This is implemented by WalkR if Config.UseUseListR is true
// This is implemented by WalkR if Config.UseRecursiveListing is true
// and f supports it and level > 1, or WalkN otherwise.
//
// If --files-from is set then a DirTree will be constructed with just
@@ -62,265 +62,12 @@ func Walk(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
if filter.Active.HaveFilesFrom() {
return walkR(f, path, includeAll, maxLevel, fn, filter.Active.MakeListR(f.NewObject))
}
// FIXME should this just be maxLevel < 0 - why the maxLevel > 1
if (maxLevel < 0 || maxLevel > 1) && fs.Config.UseListR && f.Features().ListR != nil {
return walkListR(f, path, includeAll, maxLevel, fn)
}
return walkListDirSorted(f, path, includeAll, maxLevel, fn)
}
// ListType is uses to choose which combination of files or directories is requires
type ListType byte
// Types of listing for ListR
const (
ListObjects ListType = 1 << iota // list objects only
ListDirs // list dirs only
ListAll = ListObjects | ListDirs // list files and dirs
)
// Objects returns true if the list type specifies objects
func (l ListType) Objects() bool {
return (l & ListObjects) != 0
}
// Dirs returns true if the list type specifies dirs
func (l ListType) Dirs() bool {
return (l & ListDirs) != 0
}
// Filter in (inplace) to only contain the type of list entry required
func (l ListType) Filter(in *fs.DirEntries) {
if l == ListAll {
return
}
out := (*in)[:0]
for _, entry := range *in {
switch entry.(type) {
case fs.Object:
if l.Objects() {
out = append(out, entry)
}
case fs.Directory:
if l.Dirs() {
out = append(out, entry)
}
default:
fs.Errorf(nil, "Unknown object type %T", entry)
}
}
*in = out
}
// ListR lists the directory recursively.
//
// If includeAll is not set it will use the filters defined.
//
// If maxLevel is < 0 then it will recurse indefinitely, else it will
// only do maxLevel levels.
//
// If synthesizeDirs is set then for bucket based remotes it will
// synthesize directories from the file structure. This uses extra
// memory so don't set this if you don't need directories, likewise do
// set this if you are interested in directories.
//
// It calls fn for each tranche of DirEntries read. Note that these
// don't necessarily represent a directory
//
// Note that fn will not be called concurrently whereas the directory
// listing will proceed concurrently.
//
// Directories are not listed in any particular order so you can't
// rely on parents coming before children or alphabetical ordering
//
// This is implemented by using ListR on the backend if possible and
// efficient, otherwise by Walk.
//
// NB (f, path) to be replaced by fs.Dir at some point
func ListR(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
// FIXME disable this with --no-fast-list ??? `--disable ListR` will do it...
doListR := f.Features().ListR
// Can't use ListR if...
if doListR == nil || // ...no ListR
filter.Active.HaveFilesFrom() || // ...using --files-from
maxLevel >= 0 || // ...using bounded recursion
len(filter.Active.Opt.ExcludeFile) > 0 || // ...using --exclude-file
filter.Active.BoundedRecursion() { // ...filters imply bounded recursion
return listRwalk(f, path, includeAll, maxLevel, listType, fn)
}
return listR(f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased)
}
// listRwalk walks the file tree for ListR using Walk
func listRwalk(f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error {
var listErr error
walkErr := Walk(f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error {
// Carry on listing but return the error at the end
if err != nil {
listErr = err
fs.CountError(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
listType.Filter(&entries)
return fn(entries)
})
if listErr != nil {
return listErr
}
return walkErr
}
// dirMap keeps track of directories made for bucket based remotes.
// true => directory has been sent
// false => directory has been seen but not sent
type dirMap struct {
mu sync.Mutex
m map[string]bool
root string
}
// make a new dirMap
func newDirMap(root string) *dirMap {
return &dirMap{
m: make(map[string]bool),
root: root,
}
}
// add adds a directory and parents with sent
func (dm *dirMap) add(dir string, sent bool) {
for {
if dir == dm.root || dir == "" {
return
}
currentSent, found := dm.m[dir]
if found {
// If it has been sent already then nothing more to do
if currentSent {
return
}
// If not sent already don't override
if !sent {
return
}
// currenSent == false && sent == true so needs overriding
}
dm.m[dir] = sent
// Add parents in as unsent
dir = parentDir(dir)
sent = false
}
}
// add all the directories in entries and their parents to the dirMap
func (dm *dirMap) addEntries(entries fs.DirEntries) error {
dm.mu.Lock()
defer dm.mu.Unlock()
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
dm.add(parentDir(x.Remote()), false)
case fs.Directory:
dm.add(x.Remote(), true)
default:
return errors.Errorf("unknown object type %T", entry)
}
}
return nil
}
// send any missing parents to fn
func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) {
// Count the strings first so we allocate the minimum memory
n := 0
for _, sent := range dm.m {
if !sent {
n++
}
}
if n == 0 {
return nil
}
dirs := make([]string, 0, n)
// Fill the dirs up then sort it
for dir, sent := range dm.m {
if !sent {
dirs = append(dirs, dir)
}
}
sort.Strings(dirs)
// Now convert to bulkier Dir in batches and send
now := time.Now()
list := NewListRHelper(fn)
for _, dir := range dirs {
err = list.Add(fs.NewDir(dir, now))
if err != nil {
return err
}
}
return list.Flush()
}
// listR walks the file tree using ListR
func listR(f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error {
includeDirectory := filter.Active.IncludeDirectory(f)
if !includeAll {
includeAll = filter.Active.InActive()
}
var dm *dirMap
if synthesizeDirs {
dm = newDirMap(path)
}
var mu sync.Mutex
err := doListR(path, func(entries fs.DirEntries) (err error) {
if synthesizeDirs {
err = dm.addEntries(entries)
if err != nil {
return err
}
}
listType.Filter(&entries)
if !includeAll {
filteredEntries := entries[:0]
for _, entry := range entries {
var include bool
switch x := entry.(type) {
case fs.Object:
include = filter.Active.IncludeObject(x)
case fs.Directory:
include, err = includeDirectory(x.Remote())
if err != nil {
return err
}
default:
return errors.Errorf("unknown object type %T", entry)
}
if include {
filteredEntries = append(filteredEntries, entry)
} else {
fs.Debugf(entry, "Excluded from sync (and deletion)")
}
}
entries = filteredEntries
}
mu.Lock()
defer mu.Unlock()
return fn(entries)
})
if err != nil {
return err
}
if synthesizeDirs {
err = dm.sendEntries(fn)
if err != nil {
return err
}
}
return nil
}
// walkListDirSorted lists the directory.
//
// It implements Walk using non recursive directory listing.
@@ -759,9 +506,12 @@ func walkR(f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR f
return nil
}
// GetAll runs ListR getting all the results
// GetAll runs Walk getting all the results
func GetAll(f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) {
err = ListR(f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error {
err = Walk(f, path, includeAll, maxLevel, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
return err
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:

View File

@@ -2,15 +2,12 @@ package walk
import (
"fmt"
"io"
"strings"
"sync"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/filter"
"github.com/ncw/rclone/fstest/mockdir"
"github.com/ncw/rclone/fstest/mockfs"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
@@ -637,307 +634,3 @@ b/c/d/
// Set to default value, to avoid side effects
filter.Active.Opt.ExcludeFile = ""
}
func TestListType(t *testing.T) {
assert.Equal(t, true, ListObjects.Objects())
assert.Equal(t, false, ListObjects.Dirs())
assert.Equal(t, false, ListDirs.Objects())
assert.Equal(t, true, ListDirs.Dirs())
assert.Equal(t, true, ListAll.Objects())
assert.Equal(t, true, ListAll.Dirs())
var (
a = mockobject.Object("a")
b = mockobject.Object("b")
dir = mockdir.New("dir")
adir = mockobject.Object("dir/a")
dir2 = mockdir.New("dir2")
origEntries = fs.DirEntries{
a, b, dir, adir, dir2,
}
dirEntries = fs.DirEntries{
dir, dir2,
}
objEntries = fs.DirEntries{
a, b, adir,
}
)
copyOrigEntries := func() (out fs.DirEntries) {
out = make(fs.DirEntries, len(origEntries))
copy(out, origEntries)
return out
}
got := copyOrigEntries()
ListAll.Filter(&got)
assert.Equal(t, origEntries, got)
got = copyOrigEntries()
ListObjects.Filter(&got)
assert.Equal(t, objEntries, got)
got = copyOrigEntries()
ListDirs.Filter(&got)
assert.Equal(t, dirEntries, got)
}
func TestListR(t *testing.T) {
objects := fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockdir.New("dir"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/c"),
}
f := mockfs.NewFs("mock", "/")
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
doListR := func(dir string, callback fs.ListRCallback) error {
var os fs.DirEntries
for _, o := range objects {
if dir == "" || strings.HasPrefix(o.Remote(), dir+"/") {
os = append(os, o)
}
}
return callback(os)
}
// Setup filter
oldFilter := filter.Active
defer func() {
filter.Active = oldFilter
}()
var err error
filter.Active, err = filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, filter.Active.AddRule("+ b"))
require.NoError(t, filter.Active.AddRule("- *"))
// Base case
clearCallback()
err = listR(f, "", true, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Objects
clearCallback()
err = listR(f, "", true, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/c"}, got)
// Base case - with Dirs
clearCallback()
err = listR(f, "", true, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter
clearCallback()
err = listR(f, "", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir", "dir/b"}, got)
// With filter - with Objects
clearCallback()
err = listR(f, "", false, ListObjects, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b"}, got)
// With filter - with Dir
clearCallback()
err = listR(f, "", false, ListDirs, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir"}, got)
// With filter and subdir
clearCallback()
err = listR(f, "dir", false, ListAll, callback, doListR, false)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// Now bucket based
objects = fs.DirEntries{
mockobject.Object("a"),
mockobject.Object("b"),
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockobject.Object("dir/subdir/c"),
mockdir.New("dir/subdir"),
}
// Base case
clearCallback()
err = listR(f, "", true, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"a", "b", "dir/a", "dir/b", "dir/subdir/c", "dir/subdir", "dir"}, got)
// With filter
clearCallback()
err = listR(f, "", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"b", "dir/b", "dir/subdir", "dir"}, got)
// With filter and subdir
clearCallback()
err = listR(f, "dir", false, ListAll, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b", "dir/subdir"}, got)
// With filter and subdir - with Objects
clearCallback()
err = listR(f, "dir", false, ListObjects, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/b"}, got)
// With filter and subdir - with Dirs
clearCallback()
err = listR(f, "dir", false, ListDirs, callback, doListR, true)
require.NoError(t, err)
require.Equal(t, []string{"dir/subdir"}, got)
}
func TestDirMapAdd(t *testing.T) {
type add struct {
dir string
sent bool
}
for i, test := range []struct {
root string
in []add
want map[string]bool
}{
{
root: "",
in: []add{
{"", true},
},
want: map[string]bool{},
},
{
root: "",
in: []add{
{"a/b/c", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": false,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b/c", true},
{"a/b", true},
},
want: map[string]bool{
"a/b/c": true,
"a/b": true,
"a": false,
},
},
{
root: "",
in: []add{
{"a/b", true},
{"a/b/c", false},
},
want: map[string]bool{
"a/b/c": false,
"a/b": true,
"a": false,
},
},
{
root: "root",
in: []add{
{"root/a/b", true},
{"root/a/b/c", false},
},
want: map[string]bool{
"root/a/b/c": false,
"root/a/b": true,
"root/a": false,
},
},
} {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
dm := newDirMap(test.root)
for _, item := range test.in {
dm.add(item.dir, item.sent)
}
assert.Equal(t, test.want, dm.m)
})
}
}
func TestDirMapAddEntries(t *testing.T) {
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
}
require.NoError(t, dm.addEntries(entries))
assert.Equal(t, map[string]bool{"dir": true, "dir2": false}, dm.m)
}
func TestDirMapSendEntries(t *testing.T) {
var got []string
clearCallback := func() {
got = nil
}
callback := func(entries fs.DirEntries) error {
for _, entry := range entries {
got = append(got, entry.Remote())
}
return nil
}
// general test
dm := newDirMap("")
entries := fs.DirEntries{
mockobject.Object("dir/a"),
mockobject.Object("dir/b"),
mockdir.New("dir"),
mockobject.Object("dir2/a"),
mockobject.Object("dir2/b"),
mockobject.Object("dir1/a"),
mockobject.Object("dir3/b"),
}
require.NoError(t, dm.addEntries(entries))
clearCallback()
err := dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string{
"dir1",
"dir2",
"dir3",
}, got)
// return error from callback
callback2 := func(entries fs.DirEntries) error {
return io.EOF
}
err = dm.sendEntries(callback2)
require.Equal(t, io.EOF, err)
// empty
dm = newDirMap("")
clearCallback()
err = dm.sendEntries(callback)
require.NoError(t, err)
assert.Equal(t, []string(nil), got)
}

View File

@@ -468,8 +468,11 @@ func Purge(f fs.Fs) {
}
if doFallbackPurge {
dirs := []string{""}
err = walk.ListR(f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
var err error
err = walk.Walk(f, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
log.Printf("purge walk returned error: %v", err)
return nil
}
entries.ForObject(func(obj fs.Object) {
fs.Debugf(f, "Purge object %q", obj.Remote())
err = obj.Remove()

View File

@@ -782,39 +782,6 @@ func Run(t *testing.T, opt *Opt) {
TestFsListDirFile2(t)
})
// Test the files are all there with walk.ListR recursive listings
t.Run("FsListR", func(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := walk.GetAll(remote, "", true, -1)
require.NoError(t, err)
assert.Equal(t, []string{
"hello_ sausage",
"hello_ sausage/êé",
"hello_ sausage/êé/Hello, 世界",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
}, dirsToNames(dirs))
assert.Equal(t, []string{
"file name.txt",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
}, objsToNames(objs))
})
// Test the files are all there with
// walk.ListR recursive listings on a sub dir
t.Run("FsListRSubdir", func(t *testing.T) {
skipIfNotOk(t)
objs, dirs, err := walk.GetAll(remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1)
require.NoError(t, err)
assert.Equal(t, []string{
"hello_ sausage/êé",
"hello_ sausage/êé/Hello, 世界",
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠",
}, dirsToNames(dirs))
assert.Equal(t, []string{
"hello_ sausage/êé/Hello, 世界/ _ ' @ _ _ & _ + ≠/z.txt",
}, objsToNames(objs))
})
// TestFsListDirRoot tests that DirList works in the root
TestFsListDirRoot := func(t *testing.T) {
skipIfNotOk(t)

View File

@@ -139,7 +139,13 @@ func newRunIndividual(t *testing.T, individual bool) *Run {
*r = *oneRun
r.cleanRemote = func() {
var toDelete []string
err := walk.ListR(r.Fremote, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
err := walk.Walk(r.Fremote, "", true, -1, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
if err == fs.ErrorDirNotFound {
return nil
}
t.Fatalf("Error listing: %v", err)
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:

View File

@@ -8,7 +8,6 @@ tests:
- path: fs/sync
subdir: true
fastlist: true
- path: vfs
backends:
# - backend: "amazonclouddrive"
# remote: "TestAmazonCloudDrive:"
@@ -139,7 +138,3 @@ backends:
remote: "TestUnion:"
subdir: false
fastlist: false
- backend: "koofr"
remote: "TestKoofr:"
subdir: false
fastlist: false

8
go.mod
View File

@@ -17,18 +17,16 @@ require (
github.com/djherbis/times v1.2.0
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a
github.com/goftp/server v0.0.0-20190111142836-88de73f463af
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
github.com/jtolds/gls v4.2.1+incompatible // indirect
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
github.com/kr/fs v0.1.0 // indirect
github.com/mattn/go-runewidth v0.0.4
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
github.com/ncw/swift v1.0.46
github.com/ncw/swift v1.0.44
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/patrickmn/go-cache v2.1.0+incompatible

8
go.sum
View File

@@ -54,8 +54,6 @@ github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
github.com/goftp/server v0.0.0-20190111142836-88de73f463af h1:PJxb1aA1z+Ohy2j28L92+ng9phXpZVFRFbPkfmJcRGo=
github.com/goftp/server v0.0.0-20190111142836-88de73f463af/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a h1:XTJuuzIub3zu2FgPqdFM9XFYYisXWu2hN/rFwayAIcY=
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
@@ -89,10 +87,6 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a h1:W+gnfphB7WpRj0rbTF40e3edULfri4fou2kUFw6AF3A=
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff h1:GlfzG8bgyoJYz+5sMvGpYnHrg4veNVNnDGuE9hTEMHk=
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -109,8 +103,6 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
github.com/ncw/swift v1.0.44 h1:EKvOTvUxElbpDWqxsyVaVGvc2IfuOqQnRmjnR2AGhQ4=
github.com/ncw/swift v1.0.44/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/ncw/swift v1.0.46 h1:ewnoFKEI9f2LT+gqmeeiJ1SCzOBDTcK3JF1XziR85QQ=
github.com/ncw/swift v1.0.46/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840=

View File

@@ -31,10 +31,11 @@ func Register(fn func()) FnHandle {
fns[&fn] = true
fnsMutex.Unlock()
// Run AtExit handlers on exitSignals so everything gets tidied up properly
// Run AtExit handlers on SIGINT or SIGTERM so everything gets
// tidied up properly
registerOnce.Do(func() {
exitChan = make(chan os.Signal, 1)
signal.Notify(exitChan, exitSignals...)
signal.Notify(exitChan, os.Interrupt) // syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT
go func() {
sig := <-exitChan
if sig == nil {

View File

@@ -1,9 +0,0 @@
//+build windows plan9
package atexit
import (
"os"
)
var exitSignals = []os.Signal{os.Interrupt}

View File

@@ -1,10 +0,0 @@
//+build !windows,!plan9
package atexit
import (
"os"
"syscall"
)
var exitSignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} // Not syscall.SIGQUIT as we want the default behaviour

View File

@@ -698,9 +698,6 @@ func (cmd commandRetr) Execute(conn *Conn, param string) {
defer data.Close()
conn.writeMessage(150, fmt.Sprintf("Data transfer starting %v bytes", bytes))
err = conn.sendOutofBandDataWriter(data)
if err != nil {
conn.writeMessage(551, "Error reading file")
}
} else {
conn.writeMessage(551, "File not available")
}

View File

@@ -1,11 +0,0 @@
/bin
/pkg
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# developer specific
*.sublime-workspace
*.sublime-project

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Koofr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,15 +0,0 @@
go-httpclient
=============
Go HTTP client.
[![GoDoc](https://godoc.org/github.com/koofr/go-httpclient?status.png)](https://godoc.org/github.com/koofr/go-httpclient)
## Install
go get github.com/koofr/go-httpclient
## Testing
go get -t
go test

View File

@@ -1,38 +0,0 @@
package httpclient
import (
"errors"
"fmt"
"net/http"
)
type InvalidStatusError struct {
Expected []int
Got int
Headers http.Header
Content string
}
func (e InvalidStatusError) Error() string {
return fmt.Sprintf("Invalid response status! Got %d, expected %d; headers: %s, content: %s", e.Got, e.Expected, e.Headers, e.Content)
}
func IsInvalidStatusError(err error) (invalidStatusError *InvalidStatusError, ok bool) {
if ise, ok := err.(InvalidStatusError); ok {
return &ise, true
} else if ise, ok := err.(*InvalidStatusError); ok {
return ise, true
} else {
return nil, false
}
}
func IsInvalidStatusCode(err error, statusCode int) bool {
if ise, ok := IsInvalidStatusError(err); ok {
return ise.Got == statusCode
} else {
return false
}
}
var RateLimitTimeoutError = errors.New("HTTPClient rate limit timeout")

View File

@@ -1,351 +0,0 @@
package httpclient
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"time"
)
var XmlHeaderBytes []byte = []byte(xml.Header)
type HTTPClient struct {
BaseURL *url.URL
Headers http.Header
Client *http.Client
PostHooks map[int]func(*http.Request, *http.Response) error
rateLimited bool
rateLimitChan chan struct{}
rateLimitTimeout time.Duration
}
func New() (httpClient *HTTPClient) {
return &HTTPClient{
Client: HttpClient,
Headers: make(http.Header),
PostHooks: make(map[int]func(*http.Request, *http.Response) error),
}
}
func Insecure() (httpClient *HTTPClient) {
httpClient = New()
httpClient.Client = InsecureHttpClient
return httpClient
}
var DefaultClient = New()
func (c *HTTPClient) SetPostHook(onStatus int, hook func(*http.Request, *http.Response) error) {
c.PostHooks[onStatus] = hook
}
func (c *HTTPClient) SetRateLimit(limit int, timeout time.Duration) {
c.rateLimited = true
c.rateLimitChan = make(chan struct{}, limit)
for i := 0; i < limit; i++ {
c.rateLimitChan <- struct{}{}
}
c.rateLimitTimeout = timeout
}
func (c *HTTPClient) buildURL(req *RequestData) *url.URL {
bu := c.BaseURL
rpath := req.Path
if strings.HasSuffix(bu.Path, "/") && strings.HasPrefix(rpath, "/") {
rpath = rpath[1:]
}
opaque := EscapePath(bu.Path + rpath)
u := &url.URL{
Scheme: bu.Scheme,
Host: bu.Host,
Opaque: opaque,
}
if req.Params != nil {
u.RawQuery = req.Params.Encode()
}
return u
}
func (c *HTTPClient) setHeaders(req *RequestData, httpReq *http.Request) {
switch req.RespEncoding {
case EncodingJSON:
httpReq.Header.Set("Accept", "application/json")
case EncodingXML:
httpReq.Header.Set("Accept", "application/xml")
}
if c.Headers != nil {
for key, values := range c.Headers {
for _, value := range values {
httpReq.Header.Set(key, value)
}
}
}
if req.Headers != nil {
for key, values := range req.Headers {
for _, value := range values {
httpReq.Header.Set(key, value)
}
}
}
}
func (c *HTTPClient) checkStatus(req *RequestData, response *http.Response) (err error) {
if req.ExpectedStatus != nil {
statusOk := false
for _, status := range req.ExpectedStatus {
if response.StatusCode == status {
statusOk = true
}
}
if !statusOk {
lr := io.LimitReader(response.Body, 10*1024)
contentBytes, _ := ioutil.ReadAll(lr)
content := string(contentBytes)
err = InvalidStatusError{
Expected: req.ExpectedStatus,
Got: response.StatusCode,
Headers: response.Header,
Content: content,
}
return err
}
}
return nil
}
func (c *HTTPClient) unmarshalResponse(req *RequestData, response *http.Response) (err error) {
var buf []byte
switch req.RespEncoding {
case EncodingJSON:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
err = json.Unmarshal(buf, req.RespValue)
if err != nil {
return err
}
return nil
case EncodingXML:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
err = xml.Unmarshal(buf, req.RespValue)
if err != nil {
return err
}
return nil
}
switch req.RespValue.(type) {
case *[]byte:
defer response.Body.Close()
if buf, err = ioutil.ReadAll(response.Body); err != nil {
return err
}
respVal := req.RespValue.(*[]byte)
*respVal = buf
return nil
}
if req.RespConsume {
defer response.Body.Close()
ioutil.ReadAll(response.Body)
}
return nil
}
func (c *HTTPClient) marshalRequest(req *RequestData) (err error) {
if req.ReqReader != nil || req.ReqValue == nil {
return nil
}
if req.Headers == nil {
req.Headers = make(http.Header)
}
var buf []byte
switch req.ReqEncoding {
case EncodingJSON:
buf, err = json.Marshal(req.ReqValue)
if err != nil {
return err
}
req.ReqReader = bytes.NewReader(buf)
req.Headers.Set("Content-Type", "application/json")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
req.ReqContentLength = int64(len(buf))
return nil
case EncodingXML:
buf, err = xml.Marshal(req.ReqValue)
if err != nil {
return err
}
buf = append(XmlHeaderBytes, buf...)
req.ReqReader = bytes.NewReader(buf)
req.Headers.Set("Content-Type", "application/xml")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
req.ReqContentLength = int64(len(buf))
return nil
case EncodingForm:
if data, ok := req.ReqValue.(url.Values); ok {
formStr := data.Encode()
req.ReqReader = strings.NewReader(formStr)
req.Headers.Set("Content-Type", "application/x-www-form-urlencoded")
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(formStr)))
req.ReqContentLength = int64(len(formStr))
return nil
} else {
return fmt.Errorf("HTTPClient: invalid ReqValue type %T", req.ReqValue)
}
}
return fmt.Errorf("HTTPClient: invalid ReqEncoding: %s", req.ReqEncoding)
}
func (c *HTTPClient) runPostHook(req *http.Request, response *http.Response) (err error) {
hook, ok := c.PostHooks[response.StatusCode]
if ok {
err = hook(req, response)
}
return err
}
func (c *HTTPClient) Request(req *RequestData) (response *http.Response, err error) {
err = c.marshalRequest(req)
if err != nil {
return nil, err
}
r, err := http.NewRequest(req.Method, req.FullURL, req.ReqReader)
if err != nil {
return nil, err
}
r.ContentLength = req.ReqContentLength
if req.FullURL == "" {
r.URL = c.buildURL(req)
r.Host = r.URL.Host
}
c.setHeaders(req, r)
if c.rateLimited {
if c.rateLimitTimeout > 0 {
select {
case t := <-c.rateLimitChan:
defer func() {
c.rateLimitChan <- t
}()
case <-time.After(c.rateLimitTimeout):
return nil, RateLimitTimeoutError
}
} else {
t := <-c.rateLimitChan
defer func() {
c.rateLimitChan <- t
}()
}
}
isTraceEnabled := os.Getenv("HTTPCLIENT_TRACE") != ""
if isTraceEnabled {
requestBytes, _ := httputil.DumpRequestOut(r, true)
fmt.Println(string(requestBytes))
}
if req.IgnoreRedirects {
transport := c.Client.Transport
if transport == nil {
transport = http.DefaultTransport
}
response, err = transport.RoundTrip(r)
} else {
response, err = c.Client.Do(r)
}
if isTraceEnabled {
responseBytes, _ := httputil.DumpResponse(response, true)
fmt.Println(string(responseBytes))
}
if err != nil {
return response, err
}
if err = c.runPostHook(r, response); err != nil {
return response, err
}
if err = c.checkStatus(req, response); err != nil {
defer response.Body.Close()
return response, err
}
if err = c.unmarshalResponse(req, response); err != nil {
return response, err
}
return response, nil
}

View File

@@ -1,96 +0,0 @@
package httpclient
import (
"io"
"net/http"
"net/url"
)
type Encoding string
const (
EncodingJSON = "JSON"
EncodingXML = "XML"
EncodingForm = "Form"
)
type RequestData struct {
Method string
Path string
Params url.Values
FullURL string // client.BaseURL + Path or FullURL
Headers http.Header
ReqReader io.Reader
ReqEncoding Encoding
ReqValue interface{}
ReqContentLength int64
ExpectedStatus []int
IgnoreRedirects bool
RespEncoding Encoding
RespValue interface{}
RespConsume bool
}
func (r *RequestData) CanCopy() bool {
if r.ReqReader != nil {
return false
}
return true
}
func (r *RequestData) Copy() (ok bool, nr *RequestData) {
if !r.CanCopy() {
return false, nil
}
nr = &RequestData{
Method: r.Method,
Path: r.Path,
FullURL: r.FullURL,
ReqEncoding: r.ReqEncoding,
ReqValue: r.ReqValue,
IgnoreRedirects: r.IgnoreRedirects,
RespEncoding: r.RespEncoding,
RespValue: r.RespValue,
RespConsume: r.RespConsume,
}
if r.Params != nil {
nr.Params = make(url.Values)
for k, vs := range r.Params {
nvs := make([]string, len(vs))
for i, v := range vs {
nvs[i] = v
}
nr.Params[k] = nvs
}
}
if r.Headers != nil {
nr.Headers = make(http.Header)
for k, vs := range r.Headers {
nvs := make([]string, len(vs))
for i, v := range vs {
nvs[i] = v
}
nr.Headers[k] = nvs
}
}
if r.ExpectedStatus != nil {
nr.ExpectedStatus = make([]int, len(r.ExpectedStatus))
for i, v := range r.ExpectedStatus {
nr.ExpectedStatus[i] = v
}
}
return true, nr
}

View File

@@ -1,62 +0,0 @@
package httpclient
import (
"io"
"mime/multipart"
"net/http"
)
func (req *RequestData) UploadFile(fieldName string, fileName string, reader io.Reader) (err error) {
return req.UploadFileExtra(fieldName, fileName, reader, nil)
}
func (req *RequestData) UploadFileExtra(fieldName string, fileName string, reader io.Reader, extra map[string]string) (err error) {
r, w := io.Pipe()
writer := multipart.NewWriter(w)
go func() {
var err error
defer func() {
if err == nil {
w.Close()
}
}()
for k, v := range extra {
err = writer.WriteField(k, v)
if err != nil {
w.CloseWithError(err)
return
}
}
part, err := writer.CreateFormFile(fieldName, fileName)
if err != nil {
w.CloseWithError(err)
return
}
defer writer.Close()
_, err = io.Copy(part, reader)
if err != nil {
w.CloseWithError(err)
return
}
}()
req.ReqReader = r
if req.Headers == nil {
req.Headers = make(http.Header)
}
req.Headers.Set("Content-Type", writer.FormDataContentType())
return
}

View File

@@ -1,29 +0,0 @@
package httpclient
import (
"crypto/tls"
"net/http"
)
var HttpTransport = &http.Transport{
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
var HttpClient = &http.Client{
Transport: HttpTransport,
}
var InsecureTlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
var InsecureHttpTransport = &http.Transport{
TLSClientConfig: InsecureTlsConfig,
DisableCompression: true,
Proxy: http.ProxyFromEnvironment,
}
var InsecureHttpClient = &http.Client{
Transport: InsecureHttpTransport,
}

View File

@@ -1,14 +0,0 @@
package httpclient
import (
"net/url"
"strings"
)
func EscapePath(path string) string {
u := url.URL{
Path: path,
}
return strings.Replace(u.String(), "+", "%2b", -1)
}

View File

@@ -1,11 +0,0 @@
/bin
/pkg
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# developer specific
*.sublime-workspace
*.sublime-project

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Koofr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,15 +0,0 @@
go-koofrclient
===========
Go Koofr client.
[![GoDoc](https://godoc.org/github.com/koofr/go-koofrclient?status.png)](https://godoc.org/github.com/koofr/go-koofrclient)
## Install
go get github.com/koofr/go-koofrclient
## Testing
go get -t
KOOFR_APIBASE="https://app.koofr.net" KOOFR_EMAIL="email@example.com" KOOFR_PASSWORD="yourpassword" go test

View File

@@ -1,217 +0,0 @@
package koofrclient
import (
"path"
)
type TokenRequest struct {
Email string `json:"email"`
Password string `json:"password"`
}
type Token struct {
Token string
}
type MountType string
const (
MountDeviceType = "device"
MountExportType = "export"
MountImportType = "import"
)
type Mount struct {
Id string `json:"id"`
Name string `json:"name"`
Type MountType `json:"type"`
Origin string `json:"origin"`
SpaceTotal int64 `json:"spaceTotal"`
SpaceUsed int64 `json:"spaceUsed"`
Online bool `json:"online"`
Owner MountUser `json:"owner"`
Users []MountUser `json:"users"`
Groups []MountGroup `json:"groups"`
Version int `json:"version"`
Permissions MountPermissions `json:"permissions"`
IsPrimary bool `json:"isPrimary"`
IsShared bool `json:"isShared"`
}
type MountUser struct {
Id string `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
Permissions MountPermissions `json:"permissions"`
}
type MountGroup struct {
Id string `json:"id"`
Name string `json:"name"`
Permissions MountPermissions `json:"permissions"`
}
type MountPermissions struct {
Read bool `json:"READ"`
Write bool `json:"write"`
Owner bool `json:"OWNER"`
Mount bool `json:"MOUNT"`
CreateReceiver bool `json:"CREATE_RECEIVER"`
CreateLink bool `json:"CREATE_LINK"`
CreateAction bool `json:"CREATE_ACTION"`
Comment bool `json:"COMMENT"`
}
type DeviceProvider string
const (
StorageHubProvider = "storagehub"
StorageBlobProvider = "storageblob"
)
type Device struct {
Id string `json:"id"`
ApiKey string `json:"apiKey"`
Name string `json:"name"`
Status string `json:"status"`
SpaceTotal int64 `json:"spaceTotal"`
SpaceUsed int64 `json:"spaceUsed"`
SpaceFree int64 `json:"spaceFree"`
Version int `json:"version"`
Provider struct {
Name string `json:"name"`
Data interface{} `json:"data"`
} `json:"provider"`
ReadOnly bool `json:"readonly"`
RootMountId string `json:"rootMountId"`
}
type DeviceCreate struct {
Name string `json:"name"`
ProviderName DeviceProvider `json:"providerName"`
}
type DeviceUpdate struct {
Name string `json:"name"`
}
type FolderCreate struct {
Name string `json:"name"`
}
type FileCopy struct {
ToMountId string `json:"toMountId"`
TPath string `json:"toPath"`
}
type FileMove struct {
ToMountId string `json:"toMountId"`
TPath string `json:"toPath"`
}
type FileSpan struct {
Start int64
End int64
}
type FileUpload struct {
Name string `json:"name"`
}
type PutFilter struct {
Modified *int64
Size *int64
Hash *string
IgnoreNonExisting bool
NoRename bool
ForceOverwrite bool
}
type DeleteFilter struct {
Modified *int64
Size *int64
Hash *string
IfEmpty bool
}
type FileInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Modified int64 `json:"modified"`
Size int64 `json:"size"`
ContentType string `json:"contentType"`
Path string `json:"path"`
Hash string `json:"hash"`
}
type FileTree struct {
FileInfo
Children []*FileTree `json:"children"`
}
func (tree *FileTree) Flatten() []FileInfo {
trees := []*FileTree{tree}
for i := 0; i < len(trees); i++ {
tree := trees[i]
for _, child := range tree.Children {
child.Name = path.Join(tree.Name, child.Name)
trees = append(trees, child)
}
}
infos := make([]FileInfo, len(trees))
for i, tree := range trees {
infos[i] = tree.FileInfo
}
return infos
}
type User struct {
Id string `json:"id"`
FirstName string `json:"firstName"`
LastName string `json:"lastName"`
Email string `json:"email"`
}
type Shared struct {
Name string `json:name`
Type MountType `json:type`
Modified int64 `json:modified`
Size int64 `json:size`
ContentType string `json:contentType`
Hash string `json:hash`
Mount Mount `json:mount`
Link Link `json:link`
Receiver Receiver `json:receiver`
}
type Link struct {
Id string `json:id`
Name string `json:name`
Path string `json:path`
Counter int64 `json:counter`
Url string `json:url`
ShortUrl string `json:shortUrl`
Hash string `json:hash`
Host string `json:host`
HasPassword bool `json:hasPassword`
Password string `json:password`
ValidFrom int64 `json:validFrom`
ValidTo int64 `json:validTo`
PasswordRequired bool `json:passwordRequired`
}
type Receiver struct {
Id string `json:id`
Name string `json:name`
Path string `json:path`
Counter int64 `json:counter`
Url string `json:url`
ShortUrl string `json:shortUrl`
Hash string `json:hash`
Host string `json:host`
HasPassword bool `json:hasPassword`
Password string `json:password`
ValidFrom int64 `json:validFrom`
ValidTo int64 `json:validTo`
Alert bool `json:alert`
}

View File

@@ -1,89 +0,0 @@
package koofrclient
import (
"fmt"
"net/http"
"net/url"
"github.com/koofr/go-httpclient"
)
type KoofrClient struct {
*httpclient.HTTPClient
token string
userID string
}
func NewKoofrClient(baseUrl string, disableSecurity bool) *KoofrClient {
var httpClient *httpclient.HTTPClient
if disableSecurity {
httpClient = httpclient.Insecure()
} else {
httpClient = httpclient.New()
}
apiBaseUrl, _ := url.Parse(baseUrl)
httpClient.BaseURL = apiBaseUrl
httpClient.Headers.Set("User-Agent", "go koofrclient")
return &KoofrClient{
HTTPClient: httpClient,
token: "",
userID: "",
}
}
func (c *KoofrClient) SetUserAgent(ua string) {
c.Headers.Set("User-Agent", ua)
}
func (c *KoofrClient) SetToken(token string) {
c.token = token
c.HTTPClient.Headers.Set("Authorization", fmt.Sprintf("Token token=%s", token))
}
func (c *KoofrClient) GetToken() string {
return c.token
}
func (c *KoofrClient) SetUserID(userID string) {
c.userID = userID
}
func (c *KoofrClient) GetUserID() string {
return c.userID
}
func (c *KoofrClient) Authenticate(email string, password string) (err error) {
var tokenResponse Token
tokenRequest := TokenRequest{
Email: email,
Password: password,
}
request := httpclient.RequestData{
Method: "POST",
Path: "/token",
Headers: make(http.Header),
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: tokenRequest,
RespEncoding: httpclient.EncodingJSON,
RespValue: &tokenResponse,
}
res, err := c.Request(&request)
if err != nil {
return
}
c.SetToken(tokenResponse.Token)
c.SetUserID(res.Header.Get("X-User-ID"))
return
}

View File

@@ -1,84 +0,0 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) Devices() (devices []Device, err error) {
d := &struct {
Devices *[]Device
}{&devices}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/devices",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesCreate(name string, provider DeviceProvider) (device Device, err error) {
deviceCreate := DeviceCreate{name, provider}
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/devices",
ExpectedStatus: []int{http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: deviceCreate,
RespEncoding: httpclient.EncodingJSON,
RespValue: &device,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesDetails(deviceId string) (device Device, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &device,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesUpdate(deviceId string, deviceUpdate DeviceUpdate) (err error) {
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusNoContent},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: deviceUpdate,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) DevicesDelete(deviceId string) (err error) {
request := httpclient.RequestData{
Method: "DELETE",
Path: "/api/v2/devices/" + deviceId,
ExpectedStatus: []int{http.StatusNoContent},
RespConsume: true,
}
_, err = c.Request(&request)
return
}

View File

@@ -1,294 +0,0 @@
package koofrclient
import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"github.com/koofr/go-httpclient"
)
var ErrCannotOverwrite = fmt.Errorf("Can not overwrite (filter constraint fails)")
var ErrCannotRemove = fmt.Errorf("Can not remove (filter constraint fails)")
func (c *KoofrClient) FilesInfo(mountId string, path string) (info FileInfo, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/info",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &info,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesList(mountId string, basePath string) (files []FileInfo, err error) {
f := &struct {
Files *[]FileInfo
}{&files}
params := url.Values{}
params.Set("path", basePath)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/list",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &f,
}
_, err = c.Request(&request)
if err != nil {
return
}
for i := range files {
files[i].Path = path.Join(basePath, files[i].Name)
}
return
}
func (c *KoofrClient) FilesTree(mountId string, path string) (tree FileTree, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId + "/files/tree",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &tree,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesDelete(mountId string, path string) (err error) {
return c.filesDelete(mountId, path, nil)
}
func (c *KoofrClient) FilesDeleteIf(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
return c.filesDelete(mountId, path, deleteFilter)
}
func (c *KoofrClient) filesDelete(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
params := url.Values{}
params.Set("path", path)
if deleteFilter != nil {
if deleteFilter.Size != nil {
params.Set("removeIfSize", fmt.Sprintf("%d", *deleteFilter.Size))
}
if deleteFilter.Modified != nil {
params.Set("removeIfModified", fmt.Sprintf("%d", *deleteFilter.Modified))
}
if deleteFilter.Hash != nil {
params.Set("removeIfHash", fmt.Sprintf("%s", *deleteFilter.Hash))
}
if deleteFilter.IfEmpty {
params.Set("removeIfEmpty", "")
}
}
request := httpclient.RequestData{
Method: "DELETE",
Path: "/api/v2/mounts/" + mountId + "/files/remove",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespConsume: true,
}
_, err = c.Request(&request)
if err != nil {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusConflict {
return ErrCannotRemove
}
default:
return err
}
}
return
}
func (c *KoofrClient) FilesNewFolder(mountId string, path string, name string) (err error) {
reqData := FolderCreate{name}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "POST",
Path: "/api/v2/mounts/" + mountId + "/files/folder",
Params: params,
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesCopy(mountId string, path string, toMountId string, toPath string) (err error) {
reqData := FileCopy{toMountId, toPath}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/mounts/" + mountId + "/files/copy",
Params: params,
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesMove(mountId string, path string, toMountId string, toPath string) (err error) {
reqData := FileMove{toMountId, toPath}
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "PUT",
Path: "/api/v2/mounts/" + mountId + "/files/move",
Params: params,
ExpectedStatus: []int{http.StatusOK},
ReqEncoding: httpclient.EncodingJSON,
ReqValue: reqData,
RespConsume: true,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) FilesGetRange(mountId string, path string, span *FileSpan) (reader io.ReadCloser, err error) {
params := url.Values{}
params.Set("path", path)
request := httpclient.RequestData{
Method: "GET",
Path: "/content/api/v2/mounts/" + mountId + "/files/get",
Params: params,
Headers: make(http.Header),
ExpectedStatus: []int{http.StatusOK, http.StatusPartialContent},
}
if span != nil {
if span.End == -1 {
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-", span.Start))
} else {
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-%d", span.Start, span.End))
}
}
res, err := c.Request(&request)
if err != nil {
return
}
reader = res.Body
return
}
func (c *KoofrClient) FilesGet(mountId string, path string) (reader io.ReadCloser, err error) {
return c.FilesGetRange(mountId, path, nil)
}
func (c *KoofrClient) FilesPut(mountId string, path string, name string, reader io.Reader) (newName string, err error) {
info, err := c.FilesPutOptions(mountId, path, name, reader, nil)
return info.Name, err
}
func (c *KoofrClient) FilesPutOptions(mountId string, path string, name string, reader io.Reader, putFilter *PutFilter) (fileInfo *FileInfo, err error) {
params := url.Values{}
params.Set("path", path)
params.Set("filename", name)
params.Set("info", "true")
if putFilter != nil {
if putFilter.Size != nil {
params.Set("overwriteIfSize", fmt.Sprintf("%d", *putFilter.Size))
}
if putFilter.Modified != nil {
params.Set("overwriteIfModified", fmt.Sprintf("%d", *putFilter.Modified))
}
if putFilter.Hash != nil {
params.Set("overwriteIfHash", fmt.Sprintf("%s", *putFilter.Hash))
}
if putFilter.IgnoreNonExisting {
params.Set("overwriteIgnoreNonexisting", "")
}
if putFilter.NoRename {
params.Set("autorename", "false")
}
if putFilter.ForceOverwrite {
params.Set("overwrite", "true")
}
}
request := httpclient.RequestData{
Method: "POST",
Path: "/content/api/v2/mounts/" + mountId + "/files/put",
Params: params,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &fileInfo,
}
err = request.UploadFile("file", "dummy", reader)
if err != nil {
return
}
_, err = c.Request(&request)
if err != nil {
switch err := err.(type) {
case httpclient.InvalidStatusError:
if err.Got == http.StatusConflict {
return nil, ErrCannotOverwrite
}
default:
return nil, err
}
}
return
}

View File

@@ -1,38 +0,0 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) Mounts() (mounts []Mount, err error) {
d := &struct {
Mounts *[]Mount
}{&mounts}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}
func (c *KoofrClient) MountsDetails(mountId string) (mount Mount, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/mounts/" + mountId,
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &mount,
}
_, err = c.Request(&request)
return
}

View File

@@ -1,25 +0,0 @@
package koofrclient
import (
"net/http"
"github.com/koofr/go-httpclient"
)
func (c *KoofrClient) Shared() (shared []Shared, err error) {
d := &struct {
Files *[]Shared
}{&shared}
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/shared",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &d,
}
_, err = c.Request(&request)
return
}

View File

@@ -1,20 +0,0 @@
package koofrclient
import (
"github.com/koofr/go-httpclient"
"net/http"
)
func (c *KoofrClient) UserInfo() (user User, err error) {
request := httpclient.RequestData{
Method: "GET",
Path: "/api/v2/user",
ExpectedStatus: []int{http.StatusOK},
RespEncoding: httpclient.EncodingJSON,
RespValue: &user,
}
_, err = c.Request(&request)
return
}

View File

@@ -2,6 +2,7 @@ language: go
sudo: false
go:
- 1.1.x
- 1.2.x
- 1.3.x
- 1.4.x
@@ -12,19 +13,18 @@ go:
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- master
matrix:
include:
- go: 1.12.x
- go: 1.11.x
env: TEST_REAL_SERVER=rackspace
- go: 1.12.x
- go: 1.11.x
env: TEST_REAL_SERVER=memset
allow_failures:
- go: 1.12.x
- go: 1.11.x
env: TEST_REAL_SERVER=rackspace
- go: 1.12.x
- go: 1.11.x
env: TEST_REAL_SERVER=memset
install: go test -i ./...
script:

View File

@@ -153,4 +153,3 @@ Contributors
- Omar Ali <omarali@users.noreply.github.com>
- Andreas Andersen <andreas@softwaredesign.se>
- kayrus <kay.diam@gmail.com>
- CodeLingo Bot <bot@codelingo.io>

15
vendor/github.com/ncw/swift/auth.go generated vendored
View File

@@ -6,7 +6,6 @@ import (
"net/http"
"net/url"
"strings"
"time"
)
// Auth defines the operations needed to authenticate with swift
@@ -26,11 +25,6 @@ type Authenticator interface {
CdnUrl() string
}
// Expireser is an optional interface to read the expiration time of the token
type Expireser interface {
Expires() time.Time
}
type CustomEndpointAuthenticator interface {
StorageUrlForEndpoint(endpointType EndpointType) string
}
@@ -246,15 +240,6 @@ func (auth *v2Auth) Token() string {
return auth.Auth.Access.Token.Id
}
// v2 Authentication - read expires
func (auth *v2Auth) Expires() time.Time {
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
if err != nil {
return time.Time{} // return Zero if not parsed
}
return t
}
// v2 Authentication - read cdn url
func (auth *v2Auth) CdnUrl() string {
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)

Some files were not shown because too many files have changed in this diff Show More