mirror of
https://github.com/rclone/rclone.git
synced 2026-02-18 02:19:07 +00:00
Compare commits
1 Commits
fix-mega-d
...
fix-3010-i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5a8a6fbd3d |
@@ -36,7 +36,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/koofr"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
|
||||
@@ -1386,16 +1386,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
blob := o.getBlobReference()
|
||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||
httpHeaders.ContentType = fs.MimeType(o)
|
||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||
// MD5 only for PutBlob requests
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -128,29 +128,6 @@ var (
|
||||
_linkTemplates map[string]*template.Template // available link types
|
||||
)
|
||||
|
||||
// Parse the scopes option returning a slice of scopes
|
||||
func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
return scopes
|
||||
}
|
||||
|
||||
// Returns true if one of the scopes was "drive.appfolder"
|
||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||
for _, scope := range scopes {
|
||||
if scope == scopePrefix+"drive.appfolder" {
|
||||
return true
|
||||
}
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
@@ -165,14 +142,18 @@ func init() {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
driveConfig.Scopes = driveScopes(opt.Scope)
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if driveScopesContainsAppFolder(driveConfig.Scopes) {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
if opt.Scope == "" {
|
||||
opt.Scope = defaultScope
|
||||
}
|
||||
driveConfig.Scopes = nil
|
||||
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||
// Set the root_folder_id if using drive.appfolder
|
||||
if scope == "drive.appfolder" {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" {
|
||||
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||
if err != nil {
|
||||
@@ -676,33 +657,28 @@ func isPowerOfTwo(x int64) bool {
|
||||
}
|
||||
|
||||
// add a charset parameter to all text/* MIME types
|
||||
func fixMimeType(mimeTypeIn string) string {
|
||||
if mimeTypeIn == "" {
|
||||
return ""
|
||||
}
|
||||
mediaType, param, err := mime.ParseMediaType(mimeTypeIn)
|
||||
func fixMimeType(mimeType string) string {
|
||||
mediaType, param, err := mime.ParseMediaType(mimeType)
|
||||
if err != nil {
|
||||
return mimeTypeIn
|
||||
return mimeType
|
||||
}
|
||||
mimeTypeOut := mimeTypeIn
|
||||
if strings.HasPrefix(mediaType, "text/") && param["charset"] == "" {
|
||||
if strings.HasPrefix(mimeType, "text/") && param["charset"] == "" {
|
||||
param["charset"] = "utf-8"
|
||||
mimeTypeOut = mime.FormatMediaType(mediaType, param)
|
||||
mimeType = mime.FormatMediaType(mediaType, param)
|
||||
}
|
||||
if mimeTypeOut == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mimeTypeIn))
|
||||
}
|
||||
return mimeTypeOut
|
||||
return mimeType
|
||||
}
|
||||
func fixMimeTypeMap(in map[string][]string) (out map[string][]string) {
|
||||
out = make(map[string][]string, len(in))
|
||||
for k, v := range in {
|
||||
func fixMimeTypeMap(m map[string][]string) map[string][]string {
|
||||
for _, v := range m {
|
||||
for i, mt := range v {
|
||||
v[i] = fixMimeType(mt)
|
||||
fixed := fixMimeType(mt)
|
||||
if fixed == "" {
|
||||
panic(errors.Errorf("unable to fix MIME type %q", mt))
|
||||
}
|
||||
v[i] = fixed
|
||||
}
|
||||
out[fixMimeType(k)] = v
|
||||
}
|
||||
return out
|
||||
return m
|
||||
}
|
||||
func isInternalMimeType(mimeType string) bool {
|
||||
return strings.HasPrefix(mimeType, "application/vnd.google-apps.")
|
||||
@@ -799,8 +775,7 @@ func newPacer(opt *Options) *fs.Pacer {
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
@@ -907,7 +882,6 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -2436,10 +2410,6 @@ func (o *baseObject) httpResponse(url, method string, options []fs.OpenOption) (
|
||||
return req, nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
if o.bytes == 0 {
|
||||
// Don't supply range requests for 0 length objects as they always fail
|
||||
delete(req.Header, "Range")
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
|
||||
@@ -22,31 +22,6 @@ import (
|
||||
"google.golang.org/api/drive/v3"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantFlag bool
|
||||
}{
|
||||
{"", []string{
|
||||
"https://www.googleapis.com/auth/drive",
|
||||
}, false},
|
||||
{" drive.file , drive.readonly", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.readonly",
|
||||
}, false},
|
||||
{" drive.file , drive.appfolder", []string{
|
||||
"https://www.googleapis.com/auth/drive.file",
|
||||
"https://www.googleapis.com/auth/drive.appfolder",
|
||||
}, true},
|
||||
} {
|
||||
got := driveScopes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
gotFlag := driveScopesContainsAppFolder(got)
|
||||
assert.Equal(t, test.wantFlag, gotFlag, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
var additionalMimeTypes = map[string]string{
|
||||
"application/vnd.ms-excel.sheet.macroenabled.12": ".xlsm",
|
||||
|
||||
@@ -16,7 +16,6 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -46,8 +45,6 @@ import (
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
// NOTE: This API is deprecated
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
@@ -147,22 +144,6 @@ func init() {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_policy_only",
|
||||
Help: `Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
@@ -260,7 +241,6 @@ type Options struct {
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
}
|
||||
@@ -401,11 +381,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||
if err != nil {
|
||||
ctx := context.Background()
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -733,19 +709,8 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
@@ -1011,11 +976,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,589 +0,0 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
)
|
||||
|
||||
// Register Fs with rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
type Fs struct {
|
||||
name string
|
||||
mountID string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
client *koofrclient.KoofrClient
|
||||
}
|
||||
|
||||
// An Object on the remote Koofr Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info koofrclient.FileInfo
|
||||
}
|
||||
|
||||
func base(pth string) string {
|
||||
rv := path.Base(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func dir(pth string) string {
|
||||
rv := path.Dir(pth)
|
||||
if rv == "" || rv == "." {
|
||||
rv = "/"
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// String returns a string representation of the remote Object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path of the Object, relative to Fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
// Size return the size of the Object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.info.Size
|
||||
}
|
||||
|
||||
// Fs returns a reference to the Koofr Fs containing the Object
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// fullPath returns full path of the remote Object (including Fs root)
|
||||
func (o *Object) fullPath() string {
|
||||
return o.fs.fullPath(o.remote)
|
||||
}
|
||||
|
||||
// Storable returns true if the Object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(mtime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
sOff = x.Offset
|
||||
case *fs.RangeOption:
|
||||
sOff = x.Start
|
||||
eOff = x.End
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
}
|
||||
return o.fs.client.FilesGetRange(o.fs.mountID, o.fullPath(), span)
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err := o.fs.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.info = *info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
// Name returns the name of the Fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path of the Fs
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a string representation of the Fs
|
||||
func (f *Fs) String() string {
|
||||
return "koofr:" + f.mountID + ":" + f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features supported by this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pass, err := obscure.Reveal(opt.Password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
mounts, err := client.Mounts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
client: client,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
for _, m := range mounts {
|
||||
if opt.MountID != "" {
|
||||
if m.Id == opt.MountID {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
} else if m.IsPrimary {
|
||||
f.mountID = m.Id
|
||||
break
|
||||
}
|
||||
}
|
||||
if f.mountID == "" {
|
||||
if opt.MountID == "" {
|
||||
return nil, errors.New("Failed to find primary mount")
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
if info.Type == "dir" {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: info,
|
||||
remote: remote,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
name := base(fullPath)
|
||||
err = f.mkdir(dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
return &Object{
|
||||
fs: f,
|
||||
info: *info,
|
||||
remote: src.Remote(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
// HTTP status 400
|
||||
func isBadRequest(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusBadRequest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// translateErrorsDir translates koofr errors to rclone errors (for a dir
|
||||
// operation)
|
||||
func translateErrorsDir(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
|
||||
func translateErrorsObject(err error) error {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// neccessary
|
||||
func (f *Fs) mkdir(fullPath string) error {
|
||||
if fullPath == "/" {
|
||||
return nil
|
||||
}
|
||||
info, err := f.client.FilesInfo(f.mountID, fullPath)
|
||||
if err == nil && info.Type == "dir" {
|
||||
return nil
|
||||
}
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
dirs := strings.Split(fullPath, "/")
|
||||
parent := "/"
|
||||
for _, part := range dirs {
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
info, err = f.client.FilesInfo(f.mountID, path.Join(parent, part))
|
||||
if err != nil || info.Type != "dir" {
|
||||
err = translateErrorsDir(err)
|
||||
if err != nil && err != fs.ErrorDirNotFound {
|
||||
return err
|
||||
}
|
||||
err = f.client.FilesNewFolder(f.mountID, parent, part)
|
||||
if err != nil && !isBadRequest(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parent = path.Join(parent, part)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
if len(files) > 0 {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
err = f.client.FilesDelete(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err = f.client.FilesMove(srcObj.fs.mountID,
|
||||
srcObj.fs.fullPath(srcObj.remote), f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
if srcFs.mountID == f.mountID && srcFullPath == dstFullPath {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = f.client.FilesMove(srcFs.mountID, srcFullPath, f.mountID, dstFullPath)
|
||||
if err != nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(mount.SpaceTotal * 1024 * 1024),
|
||||
Used: fs.NewUsageValue(mount.SpaceUsed * 1024 * 1024),
|
||||
Trashed: nil,
|
||||
Other: nil,
|
||||
Free: fs.NewUsageValue((mount.SpaceTotal - mount.SpaceUsed) * 1024 * 1024),
|
||||
Objects: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge() error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
|
||||
// linkCreate is a Koofr API request for creating a public link
|
||||
type linkCreate struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// link is a Koofr API response to creating a public link
|
||||
type link struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Counter int64 `json:"counter"`
|
||||
URL string `json:"url"`
|
||||
ShortURL string `json:"shortUrl"`
|
||||
Hash string `json:"hash"`
|
||||
Host string `json:"host"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
Password string `json:"password"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidTo int64 `json:"validTo"`
|
||||
PasswordRequired bool `json:"passwordRequired"`
|
||||
}
|
||||
|
||||
// createLink makes a Koofr API call to create a public link
|
||||
func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link, error) {
|
||||
linkCreate := linkCreate{
|
||||
Path: path,
|
||||
}
|
||||
linkData := link{}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountID + "/links",
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: linkCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &linkData,
|
||||
}
|
||||
|
||||
_, err := c.Request(&request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linkData, nil
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
}
|
||||
return linkData.ShortURL, nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package koofr_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestKoofr:",
|
||||
})
|
||||
}
|
||||
@@ -36,7 +36,6 @@ docs = [
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mega.md",
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
|
||||
@@ -341,7 +341,8 @@ func initConfig() {
|
||||
configflags.SetFlags()
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload()
|
||||
var err error
|
||||
filter.Active, err = filter.NewFilter(&filterflags.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
@@ -7,13 +7,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -74,7 +69,7 @@ changed recently very efficiently like this:
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ destination.
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.CopyDir(fdst, fsrc, false)
|
||||
return sync.CopyDir(fdst, fsrc)
|
||||
}
|
||||
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -10,13 +10,11 @@ import (
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -54,7 +52,7 @@ can speed transfers up greatly.
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -52,7 +52,7 @@ transfer.
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if srcFileName == "" {
|
||||
return sync.MoveDir(fdst, fsrc, false, false)
|
||||
return sync.MoveDir(fdst, fsrc, false)
|
||||
}
|
||||
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
||||
})
|
||||
|
||||
@@ -6,13 +6,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
@@ -44,7 +39,7 @@ go there.
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
cmd.Run(true, true, command, func() error {
|
||||
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
|
||||
return sync.Sync(fdst, fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ Rclone is a command line program to sync files and directories to and from:
|
||||
* {{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
* {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
* {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
* {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
* {{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
* {{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
|
||||
@@ -241,7 +241,3 @@ Contributors
|
||||
* Six <brbsix@gmail.com>
|
||||
* Alexandru Bumbacea <alexandru.bumbacea@booking.com>
|
||||
* calisro <robert.calistri@gmail.com>
|
||||
* Dr.Rx <david.rey@nventive.com>
|
||||
* marcintustin <marcintustin@users.noreply.github.com>
|
||||
* jaKa Močnik <jaka@koofr.net>
|
||||
* Fionera <fionera@fionera.de>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Usage"
|
||||
date: "2019-02-25"
|
||||
date: "2015-06-06"
|
||||
---
|
||||
|
||||
Configure
|
||||
@@ -34,7 +34,6 @@ See the following for detailed instructions for
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
* [Mega](/mega/)
|
||||
* [Microsoft Azure Blob Storage](/azureblob/)
|
||||
* [Microsoft OneDrive](/onedrive/)
|
||||
|
||||
@@ -188,10 +188,3 @@ causes not all domains to be resolved properly.
|
||||
Additionally with the `GODEBUG=netdns=` environment variable the Go
|
||||
resolver decision can be influenced. This also allows to resolve certain
|
||||
issues with DNS resolution. See the [name resolution section in the go docs](https://golang.org/pkg/net/#hdr-Name_Resolution).
|
||||
|
||||
### The total size reported in the stats for a sync is wrong and keeps changing
|
||||
|
||||
It is likely you have more than 10,000 files that need to be
|
||||
synced. By default rclone only gets 10,000 files ahead in a sync so as
|
||||
not to use up too much memory. You can change this default with the
|
||||
[--max-backlog](/docs/#max-backlog-n) flag.
|
||||
|
||||
@@ -217,20 +217,6 @@ the rclone config file, you can set `service_account_credentials` with
|
||||
the actual contents of the file instead, or set the equivalent
|
||||
environment variable.
|
||||
|
||||
### Application Default Credentials ###
|
||||
|
||||
If no other source of credentials is provided, rclone will fall back
|
||||
to
|
||||
[Application Default Credentials](https://cloud.google.com/video-intelligence/docs/common/auth#authenticating_with_application_default_credentials)
|
||||
this is useful both when you already have configured authentication
|
||||
for your developer account, or in production when running on a google
|
||||
compute host. Note that if running in docker, you may need to run
|
||||
additional commands on your google compute machine -
|
||||
[see this page](https://cloud.google.com/container-registry/docs/advanced-authentication#gcloud_as_a_docker_credential_helper).
|
||||
|
||||
Note that in the case application default credentials are used, there
|
||||
is no need to explicitly configure a project number.
|
||||
|
||||
### --fast-list ###
|
||||
|
||||
This remote supports `--fast-list` which allows you to use fewer
|
||||
@@ -342,27 +328,6 @@ Access Control List for new buckets.
|
||||
- "publicReadWrite"
|
||||
- Project team owners get OWNER access, and all Users get WRITER access.
|
||||
|
||||
#### --gcs-bucket-policy-only
|
||||
|
||||
Access checks should use bucket-level IAM policies.
|
||||
|
||||
If you want to upload objects to a bucket with Bucket Policy Only set
|
||||
then you will need to set this.
|
||||
|
||||
When it is set, rclone:
|
||||
|
||||
- ignores ACLs set on buckets
|
||||
- ignores ACLs set on objects
|
||||
- creates buckets with Bucket Policy Only set
|
||||
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
|
||||
|
||||
- Config: bucket_policy_only
|
||||
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --gcs-location
|
||||
|
||||
Location for the newly created buckets.
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
---
|
||||
title: "Koofr"
|
||||
description: "Rclone docs for Koofr"
|
||||
date: "2019-02-25"
|
||||
---
|
||||
|
||||
<i class="fa fa-suitcase"></i> Koofr
|
||||
-----------------------------------------
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, eg `remote:directory/subdirectory`.
|
||||
|
||||
The initial setup for Koofr involves creating an application password for
|
||||
rclone. You can do that by opening the Koofr
|
||||
[web application](https://app.koofr.net/app/admin/preferences/password),
|
||||
giving the password a nice name like `rclone` and clicking on generate.
|
||||
|
||||
Here is an example of how to make a remote called `koofr`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> koofr
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / A stackable unification remote, which can appear to merge the contents of several remotes
|
||||
\ "union"
|
||||
2 / Alias for a existing remote
|
||||
\ "alias"
|
||||
3 / Amazon Drive
|
||||
\ "amazon cloud drive"
|
||||
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)
|
||||
\ "s3"
|
||||
5 / Backblaze B2
|
||||
\ "b2"
|
||||
6 / Box
|
||||
\ "box"
|
||||
7 / Cache a remote
|
||||
\ "cache"
|
||||
8 / Dropbox
|
||||
\ "dropbox"
|
||||
9 / Encrypt/Decrypt a remote
|
||||
\ "crypt"
|
||||
10 / FTP Connection
|
||||
\ "ftp"
|
||||
11 / Google Cloud Storage (this is not Google Drive)
|
||||
\ "google cloud storage"
|
||||
12 / Google Drive
|
||||
\ "drive"
|
||||
13 / Hubic
|
||||
\ "hubic"
|
||||
14 / JottaCloud
|
||||
\ "jottacloud"
|
||||
15 / Koofr
|
||||
\ "koofr"
|
||||
16 / Local Disk
|
||||
\ "local"
|
||||
17 / Mega
|
||||
\ "mega"
|
||||
18 / Microsoft Azure Blob Storage
|
||||
\ "azureblob"
|
||||
19 / Microsoft OneDrive
|
||||
\ "onedrive"
|
||||
20 / OpenDrive
|
||||
\ "opendrive"
|
||||
21 / Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)
|
||||
\ "swift"
|
||||
22 / Pcloud
|
||||
\ "pcloud"
|
||||
23 / QingCloud Object Storage
|
||||
\ "qingstor"
|
||||
24 / SSH/SFTP Connection
|
||||
\ "sftp"
|
||||
25 / Webdav
|
||||
\ "webdav"
|
||||
26 / Yandex Disk
|
||||
\ "yandex"
|
||||
27 / http Connection
|
||||
\ "http"
|
||||
Storage> koofr
|
||||
** See help for koofr backend at: https://rclone.org/koofr/ **
|
||||
|
||||
Your Koofr user name
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
user> USER@NAME
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
type = koofr
|
||||
baseurl = https://app.koofr.net
|
||||
user = USER@NAME
|
||||
password = *** ENCRYPTED ***
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
You can choose to edit advanced config in order to enter your own service URL
|
||||
if you use an on-premise or white label Koofr instance, or choose an alternative
|
||||
mount instead of your primary storage.
|
||||
|
||||
Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Koofr
|
||||
|
||||
rclone lsd koofr:
|
||||
|
||||
List all the files in your Koofr
|
||||
|
||||
rclone ls koofr:
|
||||
|
||||
To copy a local directory to an Koofr directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/koofr/koofr.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-user
|
||||
|
||||
Your Koofr user name
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_KOOFR_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --koofr-password
|
||||
|
||||
Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_KOOFR_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to koofr (Koofr).
|
||||
|
||||
#### --koofr-baseurl
|
||||
|
||||
Base URL of the Koofr API to connect to
|
||||
|
||||
- Config: baseurl
|
||||
- Env Var: RCLONE_KOOFR_BASEURL
|
||||
- Type: string
|
||||
- Default: "https://app.koofr.net"
|
||||
|
||||
#### --koofr-mountid
|
||||
|
||||
Mount ID of the mount to use. If omitted, the primary mount is used.
|
||||
|
||||
- Config: mountid
|
||||
- Env Var: RCLONE_KOOFR_MOUNTID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
### Limitations ###
|
||||
|
||||
Note that Koofr is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
@@ -2,7 +2,7 @@
|
||||
title: "Overview of cloud storage systems"
|
||||
description: "Overview of cloud storage systems"
|
||||
type: page
|
||||
date: "2019-02-25"
|
||||
date: "2015-09-06"
|
||||
---
|
||||
|
||||
# Overview of cloud storage systems #
|
||||
@@ -28,7 +28,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R/W |
|
||||
| Koofr | MD5 | No | Yes | No | - |
|
||||
| Mega | - | No | No | Yes | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | Yes | No | No | R/W |
|
||||
| Microsoft OneDrive | SHA1 ‡‡ | Yes | Yes | No | R |
|
||||
|
||||
@@ -1112,11 +1112,6 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
If you are using an older version of CEPH, eg 10.2.x Jewel, then you
|
||||
may need to supply the parameter `--s3-upload-cutoff 0` or put this in
|
||||
the config file as `upload_cutoff 0` to work around a bug which causes
|
||||
uploading of small files to fail.
|
||||
|
||||
Note also that Ceph sometimes puts `/` in the passwords it gives
|
||||
users. If you read the secret access key using the command line tools
|
||||
you will get a JSON blob with the `/` escaped as `\/`. Make sure you
|
||||
|
||||
@@ -67,7 +67,6 @@
|
||||
<li><a href="/http/"><i class="fa fa-globe"></i> HTTP</a></li>
|
||||
<li><a href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a></li>
|
||||
<li><a href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a></li>
|
||||
<li><a href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a></li>
|
||||
<li><a href="/mega/"><i class="fa fa-archive"></i> Mega</a></li>
|
||||
<li><a href="/azureblob/"><i class="fa fa-windows"></i> Microsoft Azure Blob Storage</a></li>
|
||||
<li><a href="/onedrive/"><i class="fa fa-windows"></i> Microsoft OneDrive</a></li>
|
||||
|
||||
@@ -13,15 +13,9 @@ var (
|
||||
Opt = filter.DefaultOpt
|
||||
)
|
||||
|
||||
// Reload the filters from the flags
|
||||
func Reload() (err error) {
|
||||
filter.Active, err = filter.NewFilter(&Opt)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOptionReload("filter", &Opt, Reload)
|
||||
rc.AddOption("filter", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file")
|
||||
|
||||
1
fs/fs.go
1
fs/fs.go
@@ -409,7 +409,6 @@ type Features struct {
|
||||
BucketBased bool // is bucket based (like s3, swift etc)
|
||||
SetTier bool // allows set tier functionality on objects
|
||||
GetTier bool // allows to retrieve storage tier of objects
|
||||
ServerSideAcrossConfigs bool // can server side copy between different remotes of the same type
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
|
||||
@@ -273,7 +273,7 @@ func Copy(f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Objec
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
if doCopy := f.Features().Copy; doCopy != nil && SameConfig(src.Fs(), f) {
|
||||
newDst, err = doCopy(src, remote)
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
@@ -392,7 +392,7 @@ func Move(fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Ob
|
||||
return newDst, nil
|
||||
}
|
||||
// See if we have Move available
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
||||
if doMove := fdst.Features().Move; doMove != nil && SameConfig(src.Fs(), fdst) {
|
||||
// Delete destination if it exists
|
||||
if dst != nil {
|
||||
err = DeleteFile(dst)
|
||||
@@ -525,11 +525,6 @@ func DeleteFiles(toBeDeleted fs.ObjectsChan) error {
|
||||
return DeleteFilesWithBackupDir(toBeDeleted, nil)
|
||||
}
|
||||
|
||||
// SameRemoteType returns true if fdst and fsrc are the same type
|
||||
func SameRemoteType(fdst, fsrc fs.Info) bool {
|
||||
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
|
||||
}
|
||||
|
||||
// SameConfig returns true if fdst and fsrc are using the same config
|
||||
// file entry
|
||||
func SameConfig(fdst, fsrc fs.Info) bool {
|
||||
|
||||
@@ -8,23 +8,13 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
optionBlock = map[string]interface{}{}
|
||||
optionReload = map[string]func() error{}
|
||||
)
|
||||
var optionBlock = map[string]interface{}{}
|
||||
|
||||
// AddOption adds an option set
|
||||
func AddOption(name string, option interface{}) {
|
||||
optionBlock[name] = option
|
||||
}
|
||||
|
||||
// AddOptionReload adds an option set with a reload function to be
|
||||
// called when options are changed
|
||||
func AddOptionReload(name string, option interface{}, reload func() error) {
|
||||
optionBlock[name] = option
|
||||
optionReload[name] = reload
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "options/blocks",
|
||||
@@ -113,12 +103,7 @@ func rcOptionsSet(in Params) (out Params, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to write options from block %q", name)
|
||||
}
|
||||
if reload := optionReload[name]; reload != nil {
|
||||
err = reload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reload options from block %q", name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package rc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -26,21 +24,9 @@ func TestAddOption(t *testing.T) {
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
AddOption("potato", &testOptions)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 0)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
}
|
||||
|
||||
func TestAddOptionReload(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
assert.Equal(t, len(optionBlock), 0)
|
||||
reload := func() error { return nil }
|
||||
AddOptionReload("potato", &testOptions, reload)
|
||||
assert.Equal(t, len(optionBlock), 1)
|
||||
assert.Equal(t, len(optionReload), 1)
|
||||
assert.Equal(t, &testOptions, optionBlock["potato"])
|
||||
assert.Equal(t, fmt.Sprintf("%p", reload), fmt.Sprintf("%p", optionReload["potato"]))
|
||||
}
|
||||
|
||||
func TestOptionsBlocks(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
AddOption("potato", &testOptions)
|
||||
@@ -67,14 +53,7 @@ func TestOptionsGet(t *testing.T) {
|
||||
|
||||
func TestOptionsSet(t *testing.T) {
|
||||
defer clearOptionBlock()
|
||||
var reloaded int
|
||||
AddOptionReload("potato", &testOptions, func() error {
|
||||
if reloaded > 0 {
|
||||
return errors.New("error while reloading")
|
||||
}
|
||||
reloaded++
|
||||
return nil
|
||||
})
|
||||
AddOption("potato", &testOptions)
|
||||
call := Calls.Get("options/set")
|
||||
require.NotNil(t, call)
|
||||
|
||||
@@ -88,12 +67,6 @@ func TestOptionsSet(t *testing.T) {
|
||||
require.Nil(t, out)
|
||||
assert.Equal(t, 50, testOptions.Int)
|
||||
assert.Equal(t, "hello", testOptions.String)
|
||||
assert.Equal(t, 1, reloaded)
|
||||
|
||||
// error from reload
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error while reloading")
|
||||
|
||||
// unknown option block
|
||||
in = Params{
|
||||
@@ -112,5 +85,4 @@ func TestOptionsSet(t *testing.T) {
|
||||
_, err = call.Fn(in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to write options")
|
||||
|
||||
}
|
||||
|
||||
@@ -39,21 +39,17 @@ func rcSyncCopyMove(in rc.Params, name string) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
switch name {
|
||||
case "sync":
|
||||
return nil, Sync(dstFs, srcFs, createEmptySrcDirs)
|
||||
return nil, Sync(dstFs, srcFs)
|
||||
case "copy":
|
||||
return nil, CopyDir(dstFs, srcFs, createEmptySrcDirs)
|
||||
return nil, CopyDir(dstFs, srcFs)
|
||||
case "move":
|
||||
deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
return nil, MoveDir(dstFs, srcFs, deleteEmptySrcDirs)
|
||||
}
|
||||
panic("unknown rcSyncCopyMove type")
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ type syncCopyMove struct {
|
||||
fsrc fs.Fs
|
||||
deleteMode fs.DeleteMode // how we are doing deletions
|
||||
DoMove bool
|
||||
copyEmptySrcDirs bool
|
||||
deleteEmptySrcDirs bool
|
||||
dir string
|
||||
// internal state
|
||||
@@ -64,7 +63,7 @@ type syncCopyMove struct {
|
||||
suffix string // suffix to add to files placed in backupDir
|
||||
}
|
||||
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
@@ -73,7 +72,6 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
fsrc: fsrc,
|
||||
deleteMode: deleteMode,
|
||||
DoMove: DoMove,
|
||||
copyEmptySrcDirs: copyEmptySrcDirs,
|
||||
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
||||
dir: "",
|
||||
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
||||
@@ -691,9 +689,7 @@ func (s *syncCopyMove) run() error {
|
||||
s.stopTransfers()
|
||||
s.stopDeleters()
|
||||
|
||||
if s.copyEmptySrcDirs {
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
}
|
||||
s.processError(copyEmptyDirectories(s.fdst, s.srcEmptyDirs))
|
||||
|
||||
// Delete files after
|
||||
if s.deleteMode == fs.DeleteModeAfter {
|
||||
@@ -856,7 +852,7 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
|
||||
// If DoMove is true then files will be moved instead of copied
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool) error {
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
}
|
||||
@@ -866,7 +862,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -877,7 +873,7 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -885,22 +881,22 @@ func runSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
|
||||
}
|
||||
|
||||
// Sync fsrc into fdst
|
||||
func Sync(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
func Sync(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.Config.DeleteMode, false, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
||||
func CopyDir(fdst, fsrc fs.Fs) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, false, false)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
func moveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool) error {
|
||||
if operations.Same(fdst, fsrc) {
|
||||
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
@@ -928,5 +924,5 @@ func MoveDir(fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) e
|
||||
}
|
||||
|
||||
// Otherwise move the files one by one
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
return moveDir(fdst, fsrc, deleteEmptySrcDirs)
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestCopyWithDryRun(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
fs.Config.DryRun = true
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestCopy(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -72,7 +72,7 @@ func TestCopyNoTraverse(t *testing.T) {
|
||||
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -90,7 +90,7 @@ func TestSyncNoTraverse(t *testing.T) {
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -108,7 +108,7 @@ func TestCopyWithDepth(t *testing.T) {
|
||||
fs.Config.MaxDepth = 1
|
||||
defer func() { fs.Config.MaxDepth = -1 }()
|
||||
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
@@ -136,7 +136,7 @@ func TestCopyWithFilesFrom(t *testing.T) {
|
||||
}
|
||||
defer unpatch()
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
unpatch()
|
||||
|
||||
@@ -153,59 +153,7 @@ func TestCopyEmptyDirectories(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test move empty directories
|
||||
func TestMoveEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = MoveDir(r.Fremote, r.Flocal, false, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
r.Fremote,
|
||||
[]fstest.Item{
|
||||
file1,
|
||||
},
|
||||
[]string{
|
||||
"sub dir",
|
||||
"sub dir2",
|
||||
},
|
||||
fs.GetModifyWindow(r.Fremote),
|
||||
)
|
||||
}
|
||||
|
||||
// Test sync empty directories
|
||||
func TestSyncEmptyDirectories(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
file1 := r.WriteFile("sub dir/hello world", "hello world", t1)
|
||||
err := operations.Mkdir(r.Flocal, "sub dir2")
|
||||
require.NoError(t, err)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err = Sync(r.Fremote, r.Flocal, true)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -234,7 +182,7 @@ func TestServerSideCopy(t *testing.T) {
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
|
||||
|
||||
err = CopyDir(FremoteCopy, r.Fremote, false)
|
||||
err = CopyDir(FremoteCopy, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
@@ -252,7 +200,7 @@ func TestCopyAfterDelete(t *testing.T) {
|
||||
err := operations.Mkdir(r.Flocal, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = CopyDir(r.Fremote, r.Flocal, false)
|
||||
err = CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal)
|
||||
@@ -266,7 +214,7 @@ func TestCopyRedownload(t *testing.T) {
|
||||
file1 := r.WriteObject("sub dir/hello world", "hello world", t1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
err := CopyDir(r.Flocal, r.Fremote, false)
|
||||
err := CopyDir(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test with combined precision of local and remote as we copied it there and back
|
||||
@@ -286,7 +234,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -298,7 +246,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -320,7 +268,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -332,7 +280,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -354,7 +302,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file.
|
||||
@@ -366,7 +314,7 @@ func TestSyncIgnoreSize(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred no files
|
||||
@@ -382,7 +330,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly 0 files because the
|
||||
@@ -393,7 +341,7 @@ func TestSyncIgnoreTimes(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreTimes = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file even though the
|
||||
@@ -413,7 +361,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
defer func() { fs.Config.IgnoreExisting = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -421,7 +369,7 @@ func TestSyncIgnoreExisting(t *testing.T) {
|
||||
// Change everything
|
||||
r.WriteFile("existing", "newpotatoes", t2)
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
// Items should not change
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -469,7 +417,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
assert.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
t,
|
||||
@@ -512,7 +460,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
defer func() { fs.Config.DryRun = false }()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -521,7 +469,7 @@ func TestSyncAfterChangingModtimeOnly(t *testing.T) {
|
||||
fs.Config.DryRun = false
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -549,7 +497,7 @@ func TestSyncAfterChangingModtimeOnlyWithNoUpdateModTime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -570,7 +518,7 @@ func TestSyncDoesntUpdateModtime(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
@@ -590,7 +538,7 @@ func TestSyncAfterAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -605,7 +553,7 @@ func TestSyncAfterChangingFilesSizeOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -628,7 +576,7 @@ func TestSyncAfterChangingContentsOnly(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
@@ -644,7 +592,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileDryRun(t *testing.T) {
|
||||
|
||||
fs.Config.DryRun = true
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
fs.Config.DryRun = false
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -663,7 +611,7 @@ func TestSyncAfterRemovingAFileAndAddingAFile(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file3)
|
||||
fstest.CheckItems(t, r.Fremote, file1, file3)
|
||||
@@ -709,7 +657,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDir(t *testing.T) {
|
||||
)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -779,7 +727,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
fs.CountError(nil)
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -856,7 +804,7 @@ func TestCopyDeleteBefore(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := CopyDir(r.Fremote, r.Flocal, false)
|
||||
err := CopyDir(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
@@ -879,14 +827,14 @@ func TestSyncWithExclude(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2, file1)
|
||||
|
||||
// Now sync the other way round and check enormous doesn't get
|
||||
// deleted as it is excluded from the sync
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2, file1, file3)
|
||||
}
|
||||
@@ -909,14 +857,14 @@ func TestSyncWithExcludeAndDeleteExcluded(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
// Check sync the other way round to make sure enormous gets
|
||||
// deleted even though it is excluded
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Flocal, r.Fremote, false)
|
||||
err = Sync(r.Flocal, r.Fremote)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
}
|
||||
@@ -951,7 +899,7 @@ func TestSyncWithUpdateOlder(t *testing.T) {
|
||||
}()
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Fremote, oneO, twoF, threeO, fourF, fiveF)
|
||||
}
|
||||
@@ -975,7 +923,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 := r.WriteFile("yam", "Yam Content", t2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
fstest.CheckItems(t, r.Flocal, f1, f2)
|
||||
@@ -984,7 +932,7 @@ func TestSyncWithTrackRenames(t *testing.T) {
|
||||
f2 = r.RenameFile(f2, "yaml")
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal, false))
|
||||
require.NoError(t, Sync(r.Fremote, r.Flocal))
|
||||
|
||||
fstest.CheckItems(t, r.Fremote, f1, f2)
|
||||
|
||||
@@ -1021,7 +969,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Do server side move
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs, false)
|
||||
err = MoveDir(FremoteMove, r.Fremote, testDeleteEmptyDirs)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -1048,7 +996,7 @@ func testServerSideMove(t *testing.T, r *fstest.Run, withFilter, testDeleteEmpty
|
||||
|
||||
// Move it back to a new empty remote, dst does not exist this time
|
||||
accounting.Stats.ResetCounters()
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs, false)
|
||||
err = MoveDir(FremoteMove2, FremoteMove, testDeleteEmptyDirs)
|
||||
require.NoError(t, err)
|
||||
|
||||
if withFilter {
|
||||
@@ -1073,7 +1021,7 @@ func TestMoveWithDeleteEmptySrcDirs(t *testing.T) {
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
// run move with --delete-empty-src-dirs
|
||||
err := MoveDir(r.Fremote, r.Flocal, true, false)
|
||||
err := MoveDir(r.Fremote, r.Flocal, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1093,7 +1041,7 @@ func TestMoveWithoutDeleteEmptySrcDirs(t *testing.T) {
|
||||
file2 := r.WriteFile("nested/sub dir/file", "nested", t1)
|
||||
r.Mkdir(r.Fremote)
|
||||
|
||||
err := MoveDir(r.Fremote, r.Flocal, false, false)
|
||||
err := MoveDir(r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
fstest.CheckListingWithPrecision(
|
||||
@@ -1154,7 +1102,7 @@ func TestServerSideMoveOverlap(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
|
||||
// Subdir move with no filters should return ErrorCantMoveOverlapping
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
|
||||
// Now try with a filter which should also fail with ErrorCantMoveOverlapping
|
||||
@@ -1162,7 +1110,7 @@ func TestServerSideMoveOverlap(t *testing.T) {
|
||||
defer func() {
|
||||
filter.Active.Opt.MinSize = -1
|
||||
}()
|
||||
err = MoveDir(FremoteMove, r.Fremote, false, false)
|
||||
err = MoveDir(FremoteMove, r.Fremote, false)
|
||||
assert.EqualError(t, err, fs.ErrorOverlapping.Error())
|
||||
}
|
||||
|
||||
@@ -1181,10 +1129,10 @@ func TestSyncOverlap(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorOverlapping.Error(), err.Error())
|
||||
}
|
||||
|
||||
checkErr(Sync(FremoteSync, r.Fremote, false))
|
||||
checkErr(Sync(r.Fremote, FremoteSync, false))
|
||||
checkErr(Sync(r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(FremoteSync, FremoteSync, false))
|
||||
checkErr(Sync(FremoteSync, r.Fremote))
|
||||
checkErr(Sync(r.Fremote, FremoteSync))
|
||||
checkErr(Sync(r.Fremote, r.Fremote))
|
||||
checkErr(Sync(FremoteSync, FremoteSync))
|
||||
}
|
||||
|
||||
// Test with BackupDir set
|
||||
@@ -1219,7 +1167,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
|
||||
require.NoError(t, err)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
err = Sync(fdst, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1240,7 +1188,7 @@ func testSyncBackupDir(t *testing.T, suffix string) {
|
||||
// This should delete three and overwrite one again, checking
|
||||
// the files got overwritten correctly in backup-dir
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(fdst, r.Flocal, false)
|
||||
err = Sync(fdst, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// one should be moved to the backup dir and the new one installed
|
||||
@@ -1277,7 +1225,7 @@ func TestSyncUTFNorm(t *testing.T) {
|
||||
fstest.CheckItems(t, r.Fremote, file2)
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have transferred exactly one file, but kept the
|
||||
@@ -1303,7 +1251,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should succeed
|
||||
accounting.Stats.ResetCounters()
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1315,7 +1263,7 @@ func TestSyncImmutable(t *testing.T) {
|
||||
|
||||
// Should fail with ErrorImmutableModified and not modify local or remote files
|
||||
accounting.Stats.ResetCounters()
|
||||
err = Sync(r.Fremote, r.Flocal, false)
|
||||
err = Sync(r.Fremote, r.Flocal)
|
||||
assert.EqualError(t, err, fs.ErrorImmutableModified.Error())
|
||||
fstest.CheckItems(t, r.Flocal, file2)
|
||||
fstest.CheckItems(t, r.Fremote, file1)
|
||||
@@ -1351,6 +1299,6 @@ func TestAbort(t *testing.T) {
|
||||
|
||||
accounting.Stats.ResetCounters()
|
||||
|
||||
err := Sync(r.Fremote, r.Flocal, false)
|
||||
err := Sync(r.Fremote, r.Flocal)
|
||||
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
|
||||
}
|
||||
|
||||
@@ -138,7 +138,3 @@ backends:
|
||||
remote: "TestUnion:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
- backend: "koofr"
|
||||
remote: "TestKoofr:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
|
||||
2
go.mod
2
go.mod
@@ -23,8 +23,6 @@ require (
|
||||
github.com/jlaffaye/ftp v0.0.0-20190126081051-8019e6774408
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a // indirect
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -87,10 +87,6 @@ github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVY
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro=
|
||||
github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a h1:W+gnfphB7WpRj0rbTF40e3edULfri4fou2kUFw6AF3A=
|
||||
github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a/go.mod h1:3xszwh+rNrYk1r9SStc4iJ326gne1OaBcrdB1ACsbzI=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff h1:GlfzG8bgyoJYz+5sMvGpYnHrg4veNVNnDGuE9hTEMHk=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
|
||||
@@ -237,6 +237,7 @@ func TestGoogleDrivePacer(t *testing.T) {
|
||||
count := 0
|
||||
for i := 0; i < test.calls; i++ {
|
||||
sleep := c.Calculate(State{})
|
||||
time.Sleep(sleep)
|
||||
if sleep != 0 {
|
||||
count++
|
||||
}
|
||||
|
||||
11
vendor/github.com/koofr/go-httpclient/.gitignore
generated
vendored
11
vendor/github.com/koofr/go-httpclient/.gitignore
generated
vendored
@@ -1,11 +0,0 @@
|
||||
/bin
|
||||
/pkg
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# developer specific
|
||||
*.sublime-workspace
|
||||
*.sublime-project
|
||||
21
vendor/github.com/koofr/go-httpclient/LICENSE
generated
vendored
21
vendor/github.com/koofr/go-httpclient/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Koofr
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
vendor/github.com/koofr/go-httpclient/README.md
generated
vendored
15
vendor/github.com/koofr/go-httpclient/README.md
generated
vendored
@@ -1,15 +0,0 @@
|
||||
go-httpclient
|
||||
=============
|
||||
|
||||
Go HTTP client.
|
||||
|
||||
[](https://godoc.org/github.com/koofr/go-httpclient)
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/koofr/go-httpclient
|
||||
|
||||
## Testing
|
||||
|
||||
go get -t
|
||||
go test
|
||||
38
vendor/github.com/koofr/go-httpclient/errors.go
generated
vendored
38
vendor/github.com/koofr/go-httpclient/errors.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type InvalidStatusError struct {
|
||||
Expected []int
|
||||
Got int
|
||||
Headers http.Header
|
||||
Content string
|
||||
}
|
||||
|
||||
func (e InvalidStatusError) Error() string {
|
||||
return fmt.Sprintf("Invalid response status! Got %d, expected %d; headers: %s, content: %s", e.Got, e.Expected, e.Headers, e.Content)
|
||||
}
|
||||
|
||||
func IsInvalidStatusError(err error) (invalidStatusError *InvalidStatusError, ok bool) {
|
||||
if ise, ok := err.(InvalidStatusError); ok {
|
||||
return &ise, true
|
||||
} else if ise, ok := err.(*InvalidStatusError); ok {
|
||||
return ise, true
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func IsInvalidStatusCode(err error, statusCode int) bool {
|
||||
if ise, ok := IsInvalidStatusError(err); ok {
|
||||
return ise.Got == statusCode
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var RateLimitTimeoutError = errors.New("HTTPClient rate limit timeout")
|
||||
351
vendor/github.com/koofr/go-httpclient/httpclient.go
generated
vendored
351
vendor/github.com/koofr/go-httpclient/httpclient.go
generated
vendored
@@ -1,351 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var XmlHeaderBytes []byte = []byte(xml.Header)
|
||||
|
||||
type HTTPClient struct {
|
||||
BaseURL *url.URL
|
||||
Headers http.Header
|
||||
Client *http.Client
|
||||
PostHooks map[int]func(*http.Request, *http.Response) error
|
||||
rateLimited bool
|
||||
rateLimitChan chan struct{}
|
||||
rateLimitTimeout time.Duration
|
||||
}
|
||||
|
||||
func New() (httpClient *HTTPClient) {
|
||||
return &HTTPClient{
|
||||
Client: HttpClient,
|
||||
Headers: make(http.Header),
|
||||
PostHooks: make(map[int]func(*http.Request, *http.Response) error),
|
||||
}
|
||||
}
|
||||
|
||||
func Insecure() (httpClient *HTTPClient) {
|
||||
httpClient = New()
|
||||
httpClient.Client = InsecureHttpClient
|
||||
return httpClient
|
||||
}
|
||||
|
||||
var DefaultClient = New()
|
||||
|
||||
func (c *HTTPClient) SetPostHook(onStatus int, hook func(*http.Request, *http.Response) error) {
|
||||
c.PostHooks[onStatus] = hook
|
||||
}
|
||||
|
||||
func (c *HTTPClient) SetRateLimit(limit int, timeout time.Duration) {
|
||||
c.rateLimited = true
|
||||
c.rateLimitChan = make(chan struct{}, limit)
|
||||
|
||||
for i := 0; i < limit; i++ {
|
||||
c.rateLimitChan <- struct{}{}
|
||||
}
|
||||
|
||||
c.rateLimitTimeout = timeout
|
||||
}
|
||||
|
||||
func (c *HTTPClient) buildURL(req *RequestData) *url.URL {
|
||||
bu := c.BaseURL
|
||||
|
||||
rpath := req.Path
|
||||
|
||||
if strings.HasSuffix(bu.Path, "/") && strings.HasPrefix(rpath, "/") {
|
||||
rpath = rpath[1:]
|
||||
}
|
||||
|
||||
opaque := EscapePath(bu.Path + rpath)
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: bu.Scheme,
|
||||
Host: bu.Host,
|
||||
Opaque: opaque,
|
||||
}
|
||||
|
||||
if req.Params != nil {
|
||||
u.RawQuery = req.Params.Encode()
|
||||
}
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
func (c *HTTPClient) setHeaders(req *RequestData, httpReq *http.Request) {
|
||||
switch req.RespEncoding {
|
||||
case EncodingJSON:
|
||||
httpReq.Header.Set("Accept", "application/json")
|
||||
case EncodingXML:
|
||||
httpReq.Header.Set("Accept", "application/xml")
|
||||
}
|
||||
|
||||
if c.Headers != nil {
|
||||
for key, values := range c.Headers {
|
||||
for _, value := range values {
|
||||
httpReq.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if req.Headers != nil {
|
||||
for key, values := range req.Headers {
|
||||
for _, value := range values {
|
||||
httpReq.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HTTPClient) checkStatus(req *RequestData, response *http.Response) (err error) {
|
||||
if req.ExpectedStatus != nil {
|
||||
statusOk := false
|
||||
|
||||
for _, status := range req.ExpectedStatus {
|
||||
if response.StatusCode == status {
|
||||
statusOk = true
|
||||
}
|
||||
}
|
||||
|
||||
if !statusOk {
|
||||
lr := io.LimitReader(response.Body, 10*1024)
|
||||
contentBytes, _ := ioutil.ReadAll(lr)
|
||||
content := string(contentBytes)
|
||||
|
||||
err = InvalidStatusError{
|
||||
Expected: req.ExpectedStatus,
|
||||
Got: response.StatusCode,
|
||||
Headers: response.Header,
|
||||
Content: content,
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *HTTPClient) unmarshalResponse(req *RequestData, response *http.Response) (err error) {
|
||||
var buf []byte
|
||||
|
||||
switch req.RespEncoding {
|
||||
case EncodingJSON:
|
||||
defer response.Body.Close()
|
||||
|
||||
if buf, err = ioutil.ReadAll(response.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(buf, req.RespValue)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case EncodingXML:
|
||||
defer response.Body.Close()
|
||||
|
||||
if buf, err = ioutil.ReadAll(response.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = xml.Unmarshal(buf, req.RespValue)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
switch req.RespValue.(type) {
|
||||
case *[]byte:
|
||||
defer response.Body.Close()
|
||||
|
||||
if buf, err = ioutil.ReadAll(response.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
respVal := req.RespValue.(*[]byte)
|
||||
*respVal = buf
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.RespConsume {
|
||||
defer response.Body.Close()
|
||||
ioutil.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *HTTPClient) marshalRequest(req *RequestData) (err error) {
|
||||
if req.ReqReader != nil || req.ReqValue == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if req.Headers == nil {
|
||||
req.Headers = make(http.Header)
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
|
||||
switch req.ReqEncoding {
|
||||
case EncodingJSON:
|
||||
buf, err = json.Marshal(req.ReqValue)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.ReqReader = bytes.NewReader(buf)
|
||||
req.Headers.Set("Content-Type", "application/json")
|
||||
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
|
||||
|
||||
req.ReqContentLength = int64(len(buf))
|
||||
|
||||
return nil
|
||||
|
||||
case EncodingXML:
|
||||
buf, err = xml.Marshal(req.ReqValue)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf = append(XmlHeaderBytes, buf...)
|
||||
|
||||
req.ReqReader = bytes.NewReader(buf)
|
||||
req.Headers.Set("Content-Type", "application/xml")
|
||||
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(buf)))
|
||||
|
||||
req.ReqContentLength = int64(len(buf))
|
||||
|
||||
return nil
|
||||
|
||||
case EncodingForm:
|
||||
if data, ok := req.ReqValue.(url.Values); ok {
|
||||
formStr := data.Encode()
|
||||
req.ReqReader = strings.NewReader(formStr)
|
||||
req.Headers.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
req.Headers.Set("Content-Length", fmt.Sprintf("%d", len(formStr)))
|
||||
|
||||
req.ReqContentLength = int64(len(formStr))
|
||||
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("HTTPClient: invalid ReqValue type %T", req.ReqValue)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("HTTPClient: invalid ReqEncoding: %s", req.ReqEncoding)
|
||||
}
|
||||
|
||||
func (c *HTTPClient) runPostHook(req *http.Request, response *http.Response) (err error) {
|
||||
hook, ok := c.PostHooks[response.StatusCode]
|
||||
|
||||
if ok {
|
||||
err = hook(req, response)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *HTTPClient) Request(req *RequestData) (response *http.Response, err error) {
|
||||
err = c.marshalRequest(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := http.NewRequest(req.Method, req.FullURL, req.ReqReader)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.ContentLength = req.ReqContentLength
|
||||
|
||||
if req.FullURL == "" {
|
||||
r.URL = c.buildURL(req)
|
||||
r.Host = r.URL.Host
|
||||
}
|
||||
|
||||
c.setHeaders(req, r)
|
||||
|
||||
if c.rateLimited {
|
||||
if c.rateLimitTimeout > 0 {
|
||||
select {
|
||||
case t := <-c.rateLimitChan:
|
||||
defer func() {
|
||||
c.rateLimitChan <- t
|
||||
}()
|
||||
case <-time.After(c.rateLimitTimeout):
|
||||
return nil, RateLimitTimeoutError
|
||||
}
|
||||
} else {
|
||||
t := <-c.rateLimitChan
|
||||
defer func() {
|
||||
c.rateLimitChan <- t
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
isTraceEnabled := os.Getenv("HTTPCLIENT_TRACE") != ""
|
||||
|
||||
if isTraceEnabled {
|
||||
requestBytes, _ := httputil.DumpRequestOut(r, true)
|
||||
fmt.Println(string(requestBytes))
|
||||
}
|
||||
|
||||
if req.IgnoreRedirects {
|
||||
transport := c.Client.Transport
|
||||
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
response, err = transport.RoundTrip(r)
|
||||
} else {
|
||||
response, err = c.Client.Do(r)
|
||||
}
|
||||
|
||||
if isTraceEnabled {
|
||||
responseBytes, _ := httputil.DumpResponse(response, true)
|
||||
fmt.Println(string(responseBytes))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err = c.runPostHook(r, response); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err = c.checkStatus(req, response); err != nil {
|
||||
defer response.Body.Close()
|
||||
return response, err
|
||||
}
|
||||
|
||||
if err = c.unmarshalResponse(req, response); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
96
vendor/github.com/koofr/go-httpclient/requestdata.go
generated
vendored
96
vendor/github.com/koofr/go-httpclient/requestdata.go
generated
vendored
@@ -1,96 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
EncodingJSON = "JSON"
|
||||
EncodingXML = "XML"
|
||||
EncodingForm = "Form"
|
||||
)
|
||||
|
||||
type RequestData struct {
|
||||
Method string
|
||||
Path string
|
||||
Params url.Values
|
||||
FullURL string // client.BaseURL + Path or FullURL
|
||||
Headers http.Header
|
||||
ReqReader io.Reader
|
||||
ReqEncoding Encoding
|
||||
ReqValue interface{}
|
||||
ReqContentLength int64
|
||||
ExpectedStatus []int
|
||||
IgnoreRedirects bool
|
||||
RespEncoding Encoding
|
||||
RespValue interface{}
|
||||
RespConsume bool
|
||||
}
|
||||
|
||||
func (r *RequestData) CanCopy() bool {
|
||||
if r.ReqReader != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RequestData) Copy() (ok bool, nr *RequestData) {
|
||||
if !r.CanCopy() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nr = &RequestData{
|
||||
Method: r.Method,
|
||||
Path: r.Path,
|
||||
FullURL: r.FullURL,
|
||||
ReqEncoding: r.ReqEncoding,
|
||||
ReqValue: r.ReqValue,
|
||||
IgnoreRedirects: r.IgnoreRedirects,
|
||||
RespEncoding: r.RespEncoding,
|
||||
RespValue: r.RespValue,
|
||||
RespConsume: r.RespConsume,
|
||||
}
|
||||
|
||||
if r.Params != nil {
|
||||
nr.Params = make(url.Values)
|
||||
|
||||
for k, vs := range r.Params {
|
||||
nvs := make([]string, len(vs))
|
||||
|
||||
for i, v := range vs {
|
||||
nvs[i] = v
|
||||
}
|
||||
|
||||
nr.Params[k] = nvs
|
||||
}
|
||||
}
|
||||
|
||||
if r.Headers != nil {
|
||||
nr.Headers = make(http.Header)
|
||||
|
||||
for k, vs := range r.Headers {
|
||||
nvs := make([]string, len(vs))
|
||||
|
||||
for i, v := range vs {
|
||||
nvs[i] = v
|
||||
}
|
||||
|
||||
nr.Headers[k] = nvs
|
||||
}
|
||||
}
|
||||
|
||||
if r.ExpectedStatus != nil {
|
||||
nr.ExpectedStatus = make([]int, len(r.ExpectedStatus))
|
||||
|
||||
for i, v := range r.ExpectedStatus {
|
||||
nr.ExpectedStatus[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
return true, nr
|
||||
}
|
||||
62
vendor/github.com/koofr/go-httpclient/requestdata_upload.go
generated
vendored
62
vendor/github.com/koofr/go-httpclient/requestdata_upload.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (req *RequestData) UploadFile(fieldName string, fileName string, reader io.Reader) (err error) {
|
||||
return req.UploadFileExtra(fieldName, fileName, reader, nil)
|
||||
}
|
||||
|
||||
func (req *RequestData) UploadFileExtra(fieldName string, fileName string, reader io.Reader, extra map[string]string) (err error) {
|
||||
r, w := io.Pipe()
|
||||
|
||||
writer := multipart.NewWriter(w)
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
w.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for k, v := range extra {
|
||||
err = writer.WriteField(k, v)
|
||||
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile(fieldName, fileName)
|
||||
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
defer writer.Close()
|
||||
|
||||
_, err = io.Copy(part, reader)
|
||||
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
req.ReqReader = r
|
||||
|
||||
if req.Headers == nil {
|
||||
req.Headers = make(http.Header)
|
||||
}
|
||||
|
||||
req.Headers.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
return
|
||||
}
|
||||
29
vendor/github.com/koofr/go-httpclient/transport.go
generated
vendored
29
vendor/github.com/koofr/go-httpclient/transport.go
generated
vendored
@@ -1,29 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var HttpTransport = &http.Transport{
|
||||
DisableCompression: true,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
||||
var HttpClient = &http.Client{
|
||||
Transport: HttpTransport,
|
||||
}
|
||||
|
||||
var InsecureTlsConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
var InsecureHttpTransport = &http.Transport{
|
||||
TLSClientConfig: InsecureTlsConfig,
|
||||
DisableCompression: true,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
||||
var InsecureHttpClient = &http.Client{
|
||||
Transport: InsecureHttpTransport,
|
||||
}
|
||||
14
vendor/github.com/koofr/go-httpclient/utils.go
generated
vendored
14
vendor/github.com/koofr/go-httpclient/utils.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func EscapePath(path string) string {
|
||||
u := url.URL{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
return strings.Replace(u.String(), "+", "%2b", -1)
|
||||
}
|
||||
11
vendor/github.com/koofr/go-koofrclient/.gitignore
generated
vendored
11
vendor/github.com/koofr/go-koofrclient/.gitignore
generated
vendored
@@ -1,11 +0,0 @@
|
||||
/bin
|
||||
/pkg
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# developer specific
|
||||
*.sublime-workspace
|
||||
*.sublime-project
|
||||
21
vendor/github.com/koofr/go-koofrclient/LICENSE
generated
vendored
21
vendor/github.com/koofr/go-koofrclient/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Koofr
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
15
vendor/github.com/koofr/go-koofrclient/README.md
generated
vendored
15
vendor/github.com/koofr/go-koofrclient/README.md
generated
vendored
@@ -1,15 +0,0 @@
|
||||
go-koofrclient
|
||||
===========
|
||||
|
||||
Go Koofr client.
|
||||
|
||||
[](https://godoc.org/github.com/koofr/go-koofrclient)
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/koofr/go-koofrclient
|
||||
|
||||
## Testing
|
||||
|
||||
go get -t
|
||||
KOOFR_APIBASE="https://app.koofr.net" KOOFR_EMAIL="email@example.com" KOOFR_PASSWORD="yourpassword" go test
|
||||
217
vendor/github.com/koofr/go-koofrclient/api_scheme.go
generated
vendored
217
vendor/github.com/koofr/go-koofrclient/api_scheme.go
generated
vendored
@@ -1,217 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"path"
|
||||
)
|
||||
|
||||
type TokenRequest struct {
|
||||
Email string `json:"email"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
Token string
|
||||
}
|
||||
|
||||
type MountType string
|
||||
|
||||
const (
|
||||
MountDeviceType = "device"
|
||||
MountExportType = "export"
|
||||
MountImportType = "import"
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type MountType `json:"type"`
|
||||
Origin string `json:"origin"`
|
||||
SpaceTotal int64 `json:"spaceTotal"`
|
||||
SpaceUsed int64 `json:"spaceUsed"`
|
||||
Online bool `json:"online"`
|
||||
Owner MountUser `json:"owner"`
|
||||
Users []MountUser `json:"users"`
|
||||
Groups []MountGroup `json:"groups"`
|
||||
Version int `json:"version"`
|
||||
Permissions MountPermissions `json:"permissions"`
|
||||
IsPrimary bool `json:"isPrimary"`
|
||||
IsShared bool `json:"isShared"`
|
||||
}
|
||||
|
||||
type MountUser struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
Permissions MountPermissions `json:"permissions"`
|
||||
}
|
||||
|
||||
type MountGroup struct {
|
||||
Id string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Permissions MountPermissions `json:"permissions"`
|
||||
}
|
||||
|
||||
type MountPermissions struct {
|
||||
Read bool `json:"READ"`
|
||||
Write bool `json:"write"`
|
||||
Owner bool `json:"OWNER"`
|
||||
Mount bool `json:"MOUNT"`
|
||||
CreateReceiver bool `json:"CREATE_RECEIVER"`
|
||||
CreateLink bool `json:"CREATE_LINK"`
|
||||
CreateAction bool `json:"CREATE_ACTION"`
|
||||
Comment bool `json:"COMMENT"`
|
||||
}
|
||||
|
||||
type DeviceProvider string
|
||||
|
||||
const (
|
||||
StorageHubProvider = "storagehub"
|
||||
StorageBlobProvider = "storageblob"
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Id string `json:"id"`
|
||||
ApiKey string `json:"apiKey"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
SpaceTotal int64 `json:"spaceTotal"`
|
||||
SpaceUsed int64 `json:"spaceUsed"`
|
||||
SpaceFree int64 `json:"spaceFree"`
|
||||
Version int `json:"version"`
|
||||
Provider struct {
|
||||
Name string `json:"name"`
|
||||
Data interface{} `json:"data"`
|
||||
} `json:"provider"`
|
||||
ReadOnly bool `json:"readonly"`
|
||||
RootMountId string `json:"rootMountId"`
|
||||
}
|
||||
|
||||
type DeviceCreate struct {
|
||||
Name string `json:"name"`
|
||||
ProviderName DeviceProvider `json:"providerName"`
|
||||
}
|
||||
|
||||
type DeviceUpdate struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type FolderCreate struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type FileCopy struct {
|
||||
ToMountId string `json:"toMountId"`
|
||||
TPath string `json:"toPath"`
|
||||
}
|
||||
|
||||
type FileMove struct {
|
||||
ToMountId string `json:"toMountId"`
|
||||
TPath string `json:"toPath"`
|
||||
}
|
||||
|
||||
type FileSpan struct {
|
||||
Start int64
|
||||
End int64
|
||||
}
|
||||
|
||||
type FileUpload struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type PutFilter struct {
|
||||
Modified *int64
|
||||
Size *int64
|
||||
Hash *string
|
||||
IgnoreNonExisting bool
|
||||
NoRename bool
|
||||
ForceOverwrite bool
|
||||
}
|
||||
|
||||
type DeleteFilter struct {
|
||||
Modified *int64
|
||||
Size *int64
|
||||
Hash *string
|
||||
IfEmpty bool
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Modified int64 `json:"modified"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
Path string `json:"path"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
type FileTree struct {
|
||||
FileInfo
|
||||
Children []*FileTree `json:"children"`
|
||||
}
|
||||
|
||||
func (tree *FileTree) Flatten() []FileInfo {
|
||||
trees := []*FileTree{tree}
|
||||
for i := 0; i < len(trees); i++ {
|
||||
tree := trees[i]
|
||||
for _, child := range tree.Children {
|
||||
child.Name = path.Join(tree.Name, child.Name)
|
||||
trees = append(trees, child)
|
||||
}
|
||||
}
|
||||
infos := make([]FileInfo, len(trees))
|
||||
for i, tree := range trees {
|
||||
infos[i] = tree.FileInfo
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Id string `json:"id"`
|
||||
FirstName string `json:"firstName"`
|
||||
LastName string `json:"lastName"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
type Shared struct {
|
||||
Name string `json:name`
|
||||
Type MountType `json:type`
|
||||
Modified int64 `json:modified`
|
||||
Size int64 `json:size`
|
||||
ContentType string `json:contentType`
|
||||
Hash string `json:hash`
|
||||
Mount Mount `json:mount`
|
||||
Link Link `json:link`
|
||||
Receiver Receiver `json:receiver`
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
Id string `json:id`
|
||||
Name string `json:name`
|
||||
Path string `json:path`
|
||||
Counter int64 `json:counter`
|
||||
Url string `json:url`
|
||||
ShortUrl string `json:shortUrl`
|
||||
Hash string `json:hash`
|
||||
Host string `json:host`
|
||||
HasPassword bool `json:hasPassword`
|
||||
Password string `json:password`
|
||||
ValidFrom int64 `json:validFrom`
|
||||
ValidTo int64 `json:validTo`
|
||||
PasswordRequired bool `json:passwordRequired`
|
||||
}
|
||||
|
||||
type Receiver struct {
|
||||
Id string `json:id`
|
||||
Name string `json:name`
|
||||
Path string `json:path`
|
||||
Counter int64 `json:counter`
|
||||
Url string `json:url`
|
||||
ShortUrl string `json:shortUrl`
|
||||
Hash string `json:hash`
|
||||
Host string `json:host`
|
||||
HasPassword bool `json:hasPassword`
|
||||
Password string `json:password`
|
||||
ValidFrom int64 `json:validFrom`
|
||||
ValidTo int64 `json:validTo`
|
||||
Alert bool `json:alert`
|
||||
}
|
||||
89
vendor/github.com/koofr/go-koofrclient/client.go
generated
vendored
89
vendor/github.com/koofr/go-koofrclient/client.go
generated
vendored
@@ -1,89 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/koofr/go-httpclient"
|
||||
)
|
||||
|
||||
type KoofrClient struct {
|
||||
*httpclient.HTTPClient
|
||||
token string
|
||||
userID string
|
||||
}
|
||||
|
||||
func NewKoofrClient(baseUrl string, disableSecurity bool) *KoofrClient {
|
||||
var httpClient *httpclient.HTTPClient
|
||||
|
||||
if disableSecurity {
|
||||
httpClient = httpclient.Insecure()
|
||||
} else {
|
||||
httpClient = httpclient.New()
|
||||
}
|
||||
|
||||
apiBaseUrl, _ := url.Parse(baseUrl)
|
||||
|
||||
httpClient.BaseURL = apiBaseUrl
|
||||
|
||||
httpClient.Headers.Set("User-Agent", "go koofrclient")
|
||||
|
||||
return &KoofrClient{
|
||||
HTTPClient: httpClient,
|
||||
token: "",
|
||||
userID: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (c *KoofrClient) SetUserAgent(ua string) {
|
||||
c.Headers.Set("User-Agent", ua)
|
||||
}
|
||||
|
||||
func (c *KoofrClient) SetToken(token string) {
|
||||
c.token = token
|
||||
c.HTTPClient.Headers.Set("Authorization", fmt.Sprintf("Token token=%s", token))
|
||||
}
|
||||
|
||||
func (c *KoofrClient) GetToken() string {
|
||||
return c.token
|
||||
}
|
||||
|
||||
func (c *KoofrClient) SetUserID(userID string) {
|
||||
c.userID = userID
|
||||
}
|
||||
|
||||
func (c *KoofrClient) GetUserID() string {
|
||||
return c.userID
|
||||
}
|
||||
|
||||
func (c *KoofrClient) Authenticate(email string, password string) (err error) {
|
||||
var tokenResponse Token
|
||||
|
||||
tokenRequest := TokenRequest{
|
||||
Email: email,
|
||||
Password: password,
|
||||
}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/token",
|
||||
Headers: make(http.Header),
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: tokenRequest,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &tokenResponse,
|
||||
}
|
||||
|
||||
res, err := c.Request(&request)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.SetToken(tokenResponse.Token)
|
||||
c.SetUserID(res.Header.Get("X-User-ID"))
|
||||
|
||||
return
|
||||
}
|
||||
84
vendor/github.com/koofr/go-koofrclient/client_device.go
generated
vendored
84
vendor/github.com/koofr/go-koofrclient/client_device.go
generated
vendored
@@ -1,84 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"github.com/koofr/go-httpclient"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (c *KoofrClient) Devices() (devices []Device, err error) {
|
||||
d := &struct {
|
||||
Devices *[]Device
|
||||
}{&devices}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/devices",
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &d,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) DevicesCreate(name string, provider DeviceProvider) (device Device, err error) {
|
||||
deviceCreate := DeviceCreate{name, provider}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/devices",
|
||||
ExpectedStatus: []int{http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: deviceCreate,
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &device,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) DevicesDetails(deviceId string) (device Device, err error) {
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/devices/" + deviceId,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &device,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) DevicesUpdate(deviceId string, deviceUpdate DeviceUpdate) (err error) {
|
||||
request := httpclient.RequestData{
|
||||
Method: "PUT",
|
||||
Path: "/api/v2/devices/" + deviceId,
|
||||
ExpectedStatus: []int{http.StatusNoContent},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: deviceUpdate,
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) DevicesDelete(deviceId string) (err error) {
|
||||
request := httpclient.RequestData{
|
||||
Method: "DELETE",
|
||||
Path: "/api/v2/devices/" + deviceId,
|
||||
ExpectedStatus: []int{http.StatusNoContent},
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
294
vendor/github.com/koofr/go-koofrclient/client_files.go
generated
vendored
294
vendor/github.com/koofr/go-koofrclient/client_files.go
generated
vendored
@@ -1,294 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/koofr/go-httpclient"
|
||||
)
|
||||
|
||||
var ErrCannotOverwrite = fmt.Errorf("Can not overwrite (filter constraint fails)")
|
||||
var ErrCannotRemove = fmt.Errorf("Can not remove (filter constraint fails)")
|
||||
|
||||
func (c *KoofrClient) FilesInfo(mountId string, path string) (info FileInfo, err error) {
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/info",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &info,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesList(mountId string, basePath string) (files []FileInfo, err error) {
|
||||
f := &struct {
|
||||
Files *[]FileInfo
|
||||
}{&files}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("path", basePath)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/list",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &f,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
files[i].Path = path.Join(basePath, files[i].Name)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesTree(mountId string, path string) (tree FileTree, err error) {
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/tree",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &tree,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesDelete(mountId string, path string) (err error) {
|
||||
return c.filesDelete(mountId, path, nil)
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesDeleteIf(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
|
||||
return c.filesDelete(mountId, path, deleteFilter)
|
||||
}
|
||||
|
||||
func (c *KoofrClient) filesDelete(mountId string, path string, deleteFilter *DeleteFilter) (err error) {
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
if deleteFilter != nil {
|
||||
if deleteFilter.Size != nil {
|
||||
params.Set("removeIfSize", fmt.Sprintf("%d", *deleteFilter.Size))
|
||||
}
|
||||
if deleteFilter.Modified != nil {
|
||||
params.Set("removeIfModified", fmt.Sprintf("%d", *deleteFilter.Modified))
|
||||
}
|
||||
if deleteFilter.Hash != nil {
|
||||
params.Set("removeIfHash", fmt.Sprintf("%s", *deleteFilter.Hash))
|
||||
}
|
||||
if deleteFilter.IfEmpty {
|
||||
params.Set("removeIfEmpty", "")
|
||||
}
|
||||
}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "DELETE",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/remove",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusConflict {
|
||||
return ErrCannotRemove
|
||||
}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesNewFolder(mountId string, path string, name string) (err error) {
|
||||
reqData := FolderCreate{name}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/folder",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusCreated},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: reqData,
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesCopy(mountId string, path string, toMountId string, toPath string) (err error) {
|
||||
reqData := FileCopy{toMountId, toPath}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "PUT",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/copy",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: reqData,
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesMove(mountId string, path string, toMountId string, toPath string) (err error) {
|
||||
reqData := FileMove{toMountId, toPath}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "PUT",
|
||||
Path: "/api/v2/mounts/" + mountId + "/files/move",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
ReqEncoding: httpclient.EncodingJSON,
|
||||
ReqValue: reqData,
|
||||
RespConsume: true,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesGetRange(mountId string, path string, span *FileSpan) (reader io.ReadCloser, err error) {
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/content/api/v2/mounts/" + mountId + "/files/get",
|
||||
Params: params,
|
||||
Headers: make(http.Header),
|
||||
ExpectedStatus: []int{http.StatusOK, http.StatusPartialContent},
|
||||
}
|
||||
|
||||
if span != nil {
|
||||
if span.End == -1 {
|
||||
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-", span.Start))
|
||||
} else {
|
||||
request.Headers.Set("Range", fmt.Sprintf("bytes=%d-%d", span.Start, span.End))
|
||||
}
|
||||
}
|
||||
|
||||
res, err := c.Request(&request)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
reader = res.Body
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesGet(mountId string, path string) (reader io.ReadCloser, err error) {
|
||||
return c.FilesGetRange(mountId, path, nil)
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesPut(mountId string, path string, name string, reader io.Reader) (newName string, err error) {
|
||||
info, err := c.FilesPutOptions(mountId, path, name, reader, nil)
|
||||
return info.Name, err
|
||||
}
|
||||
|
||||
func (c *KoofrClient) FilesPutOptions(mountId string, path string, name string, reader io.Reader, putFilter *PutFilter) (fileInfo *FileInfo, err error) {
|
||||
params := url.Values{}
|
||||
params.Set("path", path)
|
||||
params.Set("filename", name)
|
||||
params.Set("info", "true")
|
||||
|
||||
if putFilter != nil {
|
||||
if putFilter.Size != nil {
|
||||
params.Set("overwriteIfSize", fmt.Sprintf("%d", *putFilter.Size))
|
||||
}
|
||||
if putFilter.Modified != nil {
|
||||
params.Set("overwriteIfModified", fmt.Sprintf("%d", *putFilter.Modified))
|
||||
}
|
||||
if putFilter.Hash != nil {
|
||||
params.Set("overwriteIfHash", fmt.Sprintf("%s", *putFilter.Hash))
|
||||
}
|
||||
if putFilter.IgnoreNonExisting {
|
||||
params.Set("overwriteIgnoreNonexisting", "")
|
||||
}
|
||||
if putFilter.NoRename {
|
||||
params.Set("autorename", "false")
|
||||
}
|
||||
if putFilter.ForceOverwrite {
|
||||
params.Set("overwrite", "true")
|
||||
}
|
||||
}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "POST",
|
||||
Path: "/content/api/v2/mounts/" + mountId + "/files/put",
|
||||
Params: params,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &fileInfo,
|
||||
}
|
||||
|
||||
err = request.UploadFile("file", "dummy", reader)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
if err != nil {
|
||||
|
||||
switch err := err.(type) {
|
||||
case httpclient.InvalidStatusError:
|
||||
if err.Got == http.StatusConflict {
|
||||
return nil, ErrCannotOverwrite
|
||||
}
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
38
vendor/github.com/koofr/go-koofrclient/client_mount.go
generated
vendored
38
vendor/github.com/koofr/go-koofrclient/client_mount.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"github.com/koofr/go-httpclient"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (c *KoofrClient) Mounts() (mounts []Mount, err error) {
|
||||
d := &struct {
|
||||
Mounts *[]Mount
|
||||
}{&mounts}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/mounts",
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &d,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *KoofrClient) MountsDetails(mountId string) (mount Mount, err error) {
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/mounts/" + mountId,
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &mount,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
25
vendor/github.com/koofr/go-koofrclient/client_shared.go
generated
vendored
25
vendor/github.com/koofr/go-koofrclient/client_shared.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/koofr/go-httpclient"
|
||||
)
|
||||
|
||||
func (c *KoofrClient) Shared() (shared []Shared, err error) {
|
||||
d := &struct {
|
||||
Files *[]Shared
|
||||
}{&shared}
|
||||
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/shared",
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &d,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
20
vendor/github.com/koofr/go-koofrclient/client_user.go
generated
vendored
20
vendor/github.com/koofr/go-koofrclient/client_user.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package koofrclient
|
||||
|
||||
import (
|
||||
"github.com/koofr/go-httpclient"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (c *KoofrClient) UserInfo() (user User, err error) {
|
||||
request := httpclient.RequestData{
|
||||
Method: "GET",
|
||||
Path: "/api/v2/user",
|
||||
ExpectedStatus: []int{http.StatusOK},
|
||||
RespEncoding: httpclient.EncodingJSON,
|
||||
RespValue: &user,
|
||||
}
|
||||
|
||||
_, err = c.Request(&request)
|
||||
|
||||
return
|
||||
}
|
||||
5
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
5
vendor/github.com/t3rm1n4l/go-mega/mega.go
generated
vendored
@@ -894,11 +894,6 @@ func (m *Mega) NewDownload(src *Node) (*Download, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// DownloadResp has an embedded error in it for some reason
|
||||
if res[0].Err != 0 {
|
||||
return nil, parseError(res[0].Err)
|
||||
}
|
||||
|
||||
_, err = decryptAttr(key, []byte(res[0].Attr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
8
vendor/github.com/t3rm1n4l/go-mega/messages.go
generated
vendored
8
vendor/github.com/t3rm1n4l/go-mega/messages.go
generated
vendored
@@ -120,10 +120,10 @@ type DownloadMsg struct {
|
||||
}
|
||||
|
||||
type DownloadResp struct {
|
||||
G string `json:"g"`
|
||||
Size uint64 `json:"s"`
|
||||
Attr string `json:"at"`
|
||||
Err ErrorMsg `json:"e"`
|
||||
G string `json:"g"`
|
||||
Size uint64 `json:"s"`
|
||||
Attr string `json:"at"`
|
||||
Err uint32 `json:"e"`
|
||||
}
|
||||
|
||||
type UploadMsg struct {
|
||||
|
||||
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@@ -98,10 +98,6 @@ github.com/jlaffaye/ftp
|
||||
github.com/jmespath/go-jmespath
|
||||
# github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1
|
||||
github.com/kardianos/osext
|
||||
# github.com/koofr/go-httpclient v0.0.0-20180104120329-03786175608a
|
||||
github.com/koofr/go-httpclient
|
||||
# github.com/koofr/go-koofrclient v0.0.0-20190131164641-7f327592caff
|
||||
github.com/koofr/go-koofrclient
|
||||
# github.com/kr/fs v0.1.0
|
||||
github.com/kr/fs
|
||||
# github.com/mattn/go-runewidth v0.0.4
|
||||
|
||||
30
vfs/dir.go
30
vfs/dir.go
@@ -290,17 +290,14 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree walk.DirTree, w
|
||||
// readDirTree forces a refresh of the complete directory tree
|
||||
func (d *Dir) readDirTree() error {
|
||||
d.mu.Lock()
|
||||
f, path := d.f, d.path
|
||||
d.mu.Unlock()
|
||||
defer d.mu.Unlock()
|
||||
when := time.Now()
|
||||
fs.Debugf(path, "Reading directory tree")
|
||||
dt, err := walk.NewDirTree(f, path, false, -1)
|
||||
d.read = time.Time{}
|
||||
fs.Debugf(d.path, "Reading directory tree")
|
||||
dt, err := walk.NewDirTree(d.f, d.path, false, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.read = time.Time{}
|
||||
err = d._readDirFromDirTree(dt, when)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -461,23 +458,8 @@ func (d *Dir) Mkdir(name string) (*Dir, error) {
|
||||
return nil, EROFS
|
||||
}
|
||||
path := path.Join(d.path, name)
|
||||
node, err := d.stat(name)
|
||||
switch err {
|
||||
case ENOENT:
|
||||
// not found, carry on
|
||||
case nil:
|
||||
// found so check what it is
|
||||
if node.IsDir() {
|
||||
return node.(*Dir), err
|
||||
}
|
||||
return nil, EEXIST
|
||||
default:
|
||||
// a different error - report
|
||||
fs.Errorf(d, "Dir.Mkdir failed to read directory: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
// fs.Debugf(path, "Dir.Mkdir")
|
||||
err = d.f.Mkdir(path)
|
||||
err := d.f.Mkdir(path)
|
||||
if err != nil {
|
||||
fs.Errorf(d, "Dir.Mkdir failed to create directory: %v", err)
|
||||
return nil, err
|
||||
@@ -618,7 +600,7 @@ func (d *Dir) Rename(oldName, newName string, destDir *Dir) error {
|
||||
}
|
||||
default:
|
||||
err = errors.Errorf("unknown type %T", oldNode)
|
||||
fs.Errorf(d.path, "Dir.Rename error: %v", err)
|
||||
fs.Errorf(d.path, "Dir.ReadDirAll error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -353,33 +353,6 @@ func TestDirMkdir(t *testing.T) {
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirMkdirSub(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
vfs, dir, file1 := dirCreate(t, r)
|
||||
|
||||
_, err := dir.Mkdir("file1")
|
||||
assert.Error(t, err)
|
||||
|
||||
sub, err := dir.Mkdir("sub")
|
||||
assert.NoError(t, err)
|
||||
|
||||
subsub, err := sub.Mkdir("subsub")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check the vfs
|
||||
checkListing(t, dir, []string{"file1,14,false", "sub,0,true"})
|
||||
checkListing(t, sub, []string{"subsub,0,true"})
|
||||
checkListing(t, subsub, []string(nil))
|
||||
|
||||
// check the underlying r.Fremote
|
||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"dir", "dir/sub", "dir/sub/subsub"}, r.Fremote.Precision())
|
||||
|
||||
vfs.Opt.ReadOnly = true
|
||||
_, err = dir.Mkdir("sausage")
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirRemove(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
@@ -263,32 +263,24 @@ func TestVFSStatfs(t *testing.T) {
|
||||
assert.Nil(t, vfs.usage)
|
||||
assert.True(t, vfs.usageTime.IsZero())
|
||||
|
||||
aboutSupported := r.Fremote.Features().About != nil
|
||||
|
||||
// read
|
||||
total, used, free := vfs.Statfs()
|
||||
if !aboutSupported {
|
||||
assert.Equal(t, int64(-1), total)
|
||||
assert.Equal(t, int64(-1), free)
|
||||
assert.Equal(t, int64(-1), used)
|
||||
return // can't test anything else if About not supported
|
||||
}
|
||||
require.NotNil(t, vfs.usage)
|
||||
assert.False(t, vfs.usageTime.IsZero())
|
||||
if vfs.usage.Total != nil {
|
||||
assert.Equal(t, *vfs.usage.Total, total)
|
||||
} else {
|
||||
assert.Equal(t, int64(-1), total)
|
||||
assert.Equal(t, -1, total)
|
||||
}
|
||||
if vfs.usage.Free != nil {
|
||||
assert.Equal(t, *vfs.usage.Free, free)
|
||||
} else {
|
||||
assert.Equal(t, int64(-1), free)
|
||||
assert.Equal(t, -1, free)
|
||||
}
|
||||
if vfs.usage.Used != nil {
|
||||
assert.Equal(t, *vfs.usage.Used, used)
|
||||
} else {
|
||||
assert.Equal(t, int64(-1), used)
|
||||
assert.Equal(t, -1, used)
|
||||
}
|
||||
|
||||
// read cached
|
||||
|
||||
Reference in New Issue
Block a user