mirror of
https://github.com/rclone/rclone.git
synced 2026-02-02 09:43:36 +00:00
Compare commits
1 Commits
pr-3782-un
...
vfs-refact
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40b9e312c6 |
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -16,10 +16,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Constants
|
||||
|
||||
@@ -1591,13 +1591,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
item.Parents = dirs
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
var i int
|
||||
earlyExit := false
|
||||
// If only one item in paths then no need to search for the ID
|
||||
// assuming google drive is doing its job properly.
|
||||
//
|
||||
@@ -1607,9 +1602,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
// - shared with me items have no parents at the root
|
||||
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
|
||||
i = 0
|
||||
// items at root can have more than one parent so we need to put
|
||||
// the item in just once.
|
||||
earlyExit = true
|
||||
} else {
|
||||
// only handle parents that are in the requested dirs list if not at root
|
||||
i = sort.SearchStrings(dirs, parent)
|
||||
@@ -1629,11 +1621,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
|
||||
// If didn't check parents then insert only once
|
||||
if earlyExit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
@@ -799,12 +799,10 @@ func (f *ftpReadCloser) Close() error {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1003,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploadedMu.Lock()
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
o.fs.uploadedMu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1068,12 +1068,6 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = setSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,3 @@ import "os"
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// setSparse makes the file be a sparse file
|
||||
func setSparse(out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,8 +44,3 @@ again:
|
||||
// }
|
||||
return err
|
||||
}
|
||||
|
||||
// setSparse makes the file be a sparse file
|
||||
func setSparse(out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -77,16 +77,3 @@ func preAllocate(size int64, out *os.File) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
FSCTL_SET_SPARSE = 0x000900c4
|
||||
)
|
||||
|
||||
// setSparse makes the file be a sparse file
|
||||
func setSparse(out *os.File) error {
|
||||
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1039,12 +1039,6 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
def := defaults.Get()
|
||||
def.Config.HTTPClient = lowTimeoutClient
|
||||
|
||||
// start a new AWS session
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "NewSession")
|
||||
}
|
||||
|
||||
// first provider to supply a credential set "wins"
|
||||
providers := []credentials.Provider{
|
||||
// use static credentials if they're present (checked by provider)
|
||||
@@ -1064,7 +1058,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
|
||||
// Pick up IAM role in case we're on EC2
|
||||
&ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(awsSession, &aws.Config{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{
|
||||
HTTPClient: lowTimeoutClient,
|
||||
}),
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
*upstream.Object
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
}
|
||||
|
||||
// Directory describes a union Directory
|
||||
//
|
||||
// This is a wrapped object contains all candidates
|
||||
type Directory struct {
|
||||
*upstream.Directory
|
||||
cd []upstream.Entry
|
||||
}
|
||||
|
||||
type entry interface {
|
||||
upstream.Entry
|
||||
candidates() []upstream.Entry
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() *upstream.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
func (o *Object) candidates() []upstream.Entry {
|
||||
return o.co
|
||||
}
|
||||
|
||||
func (d *Directory) candidates() []upstream.Entry {
|
||||
return d.cd
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entries) == 1 {
|
||||
obj := entries[0].(*upstream.Object)
|
||||
return obj.Update(ctx, in, src, options...)
|
||||
}
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(entries))
|
||||
writers := make([]io.Writer, len(entries))
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
for i := range entries {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(entries)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// Remove candidate objects selected by ACTION policy
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Remove(ctx)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.SetModTime(ctx, t)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
wg.Wait()
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the directory
|
||||
// It returns the latest ModTime of all candidates
|
||||
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
|
||||
entries := d.candidates()
|
||||
times := make([]time.Time, len(entries))
|
||||
multithread(len(entries), func(i int) {
|
||||
times[i] = entries[i].ModTime(ctx)
|
||||
})
|
||||
for _, ti := range times {
|
||||
if t.Before(ti) {
|
||||
t = ti
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Size returns the size of the directory
|
||||
// It returns the sum of all candidates
|
||||
func (d *Directory) Size() (s int64) {
|
||||
for _, e := range d.candidates() {
|
||||
s += e.Size()
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// The Errors type wraps a slice of errors
|
||||
type Errors []error
|
||||
|
||||
// Map returns a copy of the error slice with all its errors modified
|
||||
// according to the mapping function. If mapping returns nil,
|
||||
// the error is dropped from the error slice with no replacement.
|
||||
func (e Errors) Map(mapping func(error) error) Errors {
|
||||
s := make([]error, len(e))
|
||||
i := 0
|
||||
for _, err := range e {
|
||||
nerr := mapping(err)
|
||||
if nerr == nil {
|
||||
continue
|
||||
}
|
||||
s[i] = nerr
|
||||
i++
|
||||
}
|
||||
return Errors(s[:i])
|
||||
}
|
||||
|
||||
// FilterNil returns the Errors without nil
|
||||
func (e Errors) FilterNil() Errors {
|
||||
ne := e.Map(func(err error) error {
|
||||
return err
|
||||
})
|
||||
return ne
|
||||
}
|
||||
|
||||
// Err returns a error interface that filtered nil,
|
||||
// or nil if no non-nil Error is presented.
|
||||
func (e Errors) Err() error {
|
||||
ne := e.FilterNil()
|
||||
if len(ne) == 0 {
|
||||
return nil
|
||||
}
|
||||
return ne
|
||||
}
|
||||
|
||||
// Error returns a concatenated string of the contained errors
|
||||
func (e Errors) Error() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(e) == 0 {
|
||||
buf.WriteString("no error")
|
||||
}
|
||||
if len(e) == 1 {
|
||||
buf.WriteString("1 error: ")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "%d errors: ", len(e))
|
||||
}
|
||||
|
||||
for i, err := range e {
|
||||
if i != 0 {
|
||||
buf.WriteString("; ")
|
||||
}
|
||||
|
||||
buf.WriteString(err.Error())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("all", &All{})
|
||||
}
|
||||
|
||||
// All policy behaves the same as EpAll except for the CREATE category
|
||||
// Action category: same as epall.
|
||||
// Create category: apply to all branches.
|
||||
// Search category: same as epall.
|
||||
type All struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return upstreams, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epall", &EpAll{})
|
||||
}
|
||||
|
||||
// EpAll stands for existing path, All
|
||||
// Action category: apply to all found.
|
||||
// Create category: apply to all found.
|
||||
// Search category: same as epff.
|
||||
type EpAll struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
func (p *EpAll) epall(ctx context.Context, upstreams []*upstream.Fs, filePath string) ([]*upstream.Fs, error) {
|
||||
var wg sync.WaitGroup
|
||||
ufs := make([]*upstream.Fs, len(upstreams))
|
||||
for i, u := range upstreams {
|
||||
wg.Add(1)
|
||||
i, u := i, u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if findEntry(ctx, rfs, remote) != nil {
|
||||
ufs[i] = u
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
var results []*upstream.Fs
|
||||
for _, f := range ufs {
|
||||
if f != nil {
|
||||
results = append(results, f)
|
||||
}
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return p.epall(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path+"/..")
|
||||
return upstreams, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epff", &EpFF{})
|
||||
}
|
||||
|
||||
// EpFF stands for existing path, first found
|
||||
// Given the order of the candidates, act on the first one found where the relative path exists.
|
||||
type EpFF struct{}
|
||||
|
||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
ch := make(chan *upstream.Fs)
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if findEntry(ctx, rfs, remote) == nil {
|
||||
u = nil
|
||||
}
|
||||
ch <- u
|
||||
}()
|
||||
}
|
||||
var u *upstream.Fs
|
||||
for i := 0; i < len(upstreams); i++ {
|
||||
u = <-ch
|
||||
if u != nil {
|
||||
// close remaining goroutines
|
||||
go func(num int) {
|
||||
defer close(ch)
|
||||
for i := 0; i < num; i++ {
|
||||
<-ch
|
||||
}
|
||||
}(len(upstreams) - 1 - i)
|
||||
}
|
||||
}
|
||||
if u == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.epff(ctx, upstreams, path)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries[:1], nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.epff(ctx, upstreams, path+"/..")
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries[:1], nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.epff(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return entries[0], nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplfs", &EpLfs{})
|
||||
}
|
||||
|
||||
// EpLfs stands for existing path, least free space
|
||||
// Of all the candidates on which the path exists choose the one with the least free space.
|
||||
type EpLfs struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minFreeSpace int64 = math.MaxInt64
|
||||
var lfsupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
}
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsupstream = u
|
||||
}
|
||||
}
|
||||
if lfsupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lfsupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLfs) lfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minFreeSpace int64
|
||||
var lfsEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
|
||||
}
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsEntry = e
|
||||
}
|
||||
}
|
||||
return lfsEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lfsEntries(entries)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplno", &EpLno{})
|
||||
}
|
||||
|
||||
// EpLno stands for existing path, least number of objects
|
||||
// Of all the candidates on which the path exists choose the one with the least number of objects
|
||||
type EpLno struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLno) lno(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minNumObj int64 = math.MaxInt64
|
||||
var lnoUpstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
numObj, err := u.GetNumObjects()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Number of Objects is not supported for upstream %s, treating as 0", u.Name())
|
||||
}
|
||||
if minNumObj > numObj {
|
||||
minNumObj = numObj
|
||||
lnoUpstream = u
|
||||
}
|
||||
}
|
||||
if lnoUpstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lnoUpstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLno) lnoEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minNumObj int64 = math.MaxInt64
|
||||
var lnoEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
numObj, err := e.UpstreamFs().GetNumObjects()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Number of Objects is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
|
||||
}
|
||||
if minNumObj > numObj {
|
||||
minNumObj = numObj
|
||||
lnoEntry = e
|
||||
}
|
||||
}
|
||||
return lnoEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lnoEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lnoEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lno(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lnoEntries(entries)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplus", &EpLus{})
|
||||
}
|
||||
|
||||
// EpLus stands for existing path, least used space
|
||||
// Of all the candidates on which the path exists choose the one with the least used space.
|
||||
type EpLus struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minUsedSpace int64 = math.MaxInt64
|
||||
var lusupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetUsedSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Used Space is not supported for upstream %s, treating as 0", u.Name())
|
||||
}
|
||||
if space < minUsedSpace {
|
||||
minUsedSpace = space
|
||||
lusupstream = u
|
||||
}
|
||||
}
|
||||
if lusupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lusupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minUsedSpace int64
|
||||
var lusEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Used Space is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
|
||||
}
|
||||
if space < minUsedSpace {
|
||||
minUsedSpace = space
|
||||
lusEntry = e
|
||||
}
|
||||
}
|
||||
return lusEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lusEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lusEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lus(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lusEntries(entries)
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epmfs", &EpMfs{})
|
||||
}
|
||||
|
||||
// EpMfs stands for existing path, most free space
|
||||
// Of all the candidates on which the path exists choose the one with the most free space.
|
||||
type EpMfs struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpMfs) mfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var maxFreeSpace int64
|
||||
var mfsupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
}
|
||||
if maxFreeSpace < space {
|
||||
maxFreeSpace = space
|
||||
mfsupstream = u
|
||||
}
|
||||
}
|
||||
if mfsupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return mfsupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpMfs) mfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var maxFreeSpace int64
|
||||
var mfsEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
|
||||
}
|
||||
if maxFreeSpace < space {
|
||||
maxFreeSpace = space
|
||||
mfsEntry = e
|
||||
}
|
||||
}
|
||||
return mfsEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.mfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.mfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.mfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.mfsEntries(entries)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eprand", &EpRand{})
|
||||
}
|
||||
|
||||
// EpRand stands for existing path, random
|
||||
// Calls epall and then randomizes. Returns one candidate.
|
||||
type EpRand struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.randEntries(entries), nil
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("ff", &FF{})
|
||||
}
|
||||
|
||||
// FF stands for first found
|
||||
// Search category: same as epff.
|
||||
// Action category: same as epff.
|
||||
// Create category: Given the order of the candiates, act on the first one found.
|
||||
type FF struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *FF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return upstreams, fs.ErrorPermissionDenied
|
||||
}
|
||||
return upstreams[:1], nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lfs", &Lfs{})
|
||||
}
|
||||
|
||||
// Lfs stands for least free space
|
||||
// Search category: same as eplfs.
|
||||
// Action category: same as eplfs.
|
||||
// Create category: Pick the drive with the least free space.
|
||||
type Lfs struct {
|
||||
EpLfs
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lno", &Lno{})
|
||||
}
|
||||
|
||||
// Lno stands for least number of objects
|
||||
// Search category: same as eplno.
|
||||
// Action category: same as eplno.
|
||||
// Create category: Pick the drive with the least number of objects.
|
||||
type Lno struct {
|
||||
EpLno
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lus", &Lus{})
|
||||
}
|
||||
|
||||
// Lus stands for least free space
|
||||
// Search category: same as eplus.
|
||||
// Action category: same as eplus.
|
||||
// Create category: Pick the drive with the least used space.
|
||||
type Lus struct {
|
||||
EpLus
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("mfs", &Mfs{})
|
||||
}
|
||||
|
||||
// Mfs stands for most free space
|
||||
// Search category: same as epmfs.
|
||||
// Action category: same as epmfs.
|
||||
// Create category: Pick the drive with the most free space.
|
||||
type Mfs struct {
|
||||
EpMfs
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Mfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("newest", &Newest{})
|
||||
}
|
||||
|
||||
// Newest policy picks the file / directory with the largest mtime
|
||||
// It implies the existance of a path
|
||||
type Newest struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *Newest) newest(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
var wg sync.WaitGroup
|
||||
ufs := make([]*upstream.Fs, len(upstreams))
|
||||
mtimes := make([]time.Time, len(upstreams))
|
||||
for i, u := range upstreams {
|
||||
wg.Add(1)
|
||||
i, u := i, u // Closure
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if e := findEntry(ctx, rfs, remote); e != nil {
|
||||
ufs[i] = u
|
||||
mtimes[i] = e.ModTime(ctx)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
maxMtime := time.Time{}
|
||||
var newestFs *upstream.Fs
|
||||
for i, u := range ufs {
|
||||
if u != nil && mtimes[i].After(maxMtime) {
|
||||
maxMtime = mtimes[i]
|
||||
newestFs = u
|
||||
}
|
||||
}
|
||||
if newestFs == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return newestFs, nil
|
||||
}
|
||||
|
||||
func (p *Newest) newestEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var wg sync.WaitGroup
|
||||
mtimes := make([]time.Time, len(entries))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
for i, e := range entries {
|
||||
wg.Add(1)
|
||||
i, e := i, e // Closure
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
mtimes[i] = e.ModTime(ctx)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
maxMtime := time.Time{}
|
||||
var newestEntry upstream.Entry
|
||||
for i, t := range mtimes {
|
||||
if t.After(maxMtime) {
|
||||
maxMtime = t
|
||||
newestEntry = entries[i]
|
||||
}
|
||||
}
|
||||
if newestEntry == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return newestEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.newest(ctx, upstreams, path)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
e, err := p.newestEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.newest(ctx, upstreams, path+"/..")
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
e, err := p.newestEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.newest(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.newestEntries(entries)
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var policies = make(map[string]Policy)
|
||||
|
||||
// Policy is the interface of a set of defined behavior choosing
|
||||
// the upstream Fs to operate on
|
||||
type Policy interface {
|
||||
// Action category policy, governing the modification of files and directories
|
||||
Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error)
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
SearchEntries(entries ...upstream.Entry) (upstream.Entry, error)
|
||||
}
|
||||
|
||||
func registerPolicy(name string, p Policy) {
|
||||
policies[strings.ToLower(name)] = p
|
||||
}
|
||||
|
||||
// Get a Policy from the list
|
||||
func Get(name string) (Policy, error) {
|
||||
p, ok := policies[strings.ToLower(name)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("didn't find policy called %q", name)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func filterRO(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
|
||||
for _, u := range ufs {
|
||||
if u.IsWritable() {
|
||||
wufs = append(wufs, u)
|
||||
}
|
||||
}
|
||||
return wufs
|
||||
}
|
||||
|
||||
func filterROEntries(ue []upstream.Entry) (wue []upstream.Entry) {
|
||||
for _, e := range ue {
|
||||
if e.UpstreamFs().IsWritable() {
|
||||
wue = append(wue, e)
|
||||
}
|
||||
}
|
||||
return wue
|
||||
}
|
||||
|
||||
func filterNC(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
|
||||
for _, u := range ufs {
|
||||
if u.IsCreatable() {
|
||||
wufs = append(wufs, u)
|
||||
}
|
||||
}
|
||||
return wufs
|
||||
}
|
||||
|
||||
func filterNCEntries(ue []upstream.Entry) (wue []upstream.Entry) {
|
||||
for _, e := range ue {
|
||||
if e.UpstreamFs().IsCreatable() {
|
||||
wue = append(wue, e)
|
||||
}
|
||||
}
|
||||
return wue
|
||||
}
|
||||
|
||||
func parentDir(absPath string) string {
|
||||
parent := path.Dir(strings.TrimRight(absPath, "/"))
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func clean(absPath string) string {
|
||||
cleanPath := path.Clean(absPath)
|
||||
if cleanPath == "." {
|
||||
cleanPath = ""
|
||||
}
|
||||
return cleanPath
|
||||
}
|
||||
|
||||
func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
|
||||
remote = clean(remote)
|
||||
dir := parentDir(remote)
|
||||
entries, err := f.List(ctx, dir)
|
||||
if remote == dir {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// random modtime for root
|
||||
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
|
||||
return fs.NewDir("", randomNow)
|
||||
}
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
eRemote := e.Remote()
|
||||
if f.Features().CaseInsensitive {
|
||||
found = strings.EqualFold(remote, eRemote)
|
||||
} else {
|
||||
found = (remote == eRemote)
|
||||
}
|
||||
if found {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("rand", &Rand{})
|
||||
}
|
||||
|
||||
// Rand stands for random
|
||||
// Calls all and then randomizes. Returns one candidate.
|
||||
type Rand struct {
|
||||
All
|
||||
}
|
||||
|
||||
func (p *Rand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *Rand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.All.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.All.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.randEntries(entries), nil
|
||||
}
|
||||
@@ -1,20 +1,17 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/policy"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -24,32 +21,12 @@ import (
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "Union merges the contents of several upstream fs",
|
||||
Description: "Union merges the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: "List of space separated upstreams.\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.\n",
|
||||
Name: "remotes",
|
||||
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Required: true,
|
||||
Default: "epall",
|
||||
}, {
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Required: true,
|
||||
Default: "epmfs",
|
||||
}, {
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Required: true,
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds)",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -57,50 +34,41 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
CacheTime int `config:"cache_time"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a union of upstreams
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
upstreams []*upstream.Fs // slice of upstreams
|
||||
hashSet hash.Set // intersection of hash types
|
||||
actionPolicy policy.Policy // policy for ACTION
|
||||
createPolicy policy.Policy // policy for CREATE
|
||||
searchPolicy policy.Policy // policy for SEARCH
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Wrap candidate objects in to an union Object
|
||||
func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
||||
e, err := f.searchEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch e.(type) {
|
||||
case *upstream.Object:
|
||||
return &Object{
|
||||
Object: e.(*upstream.Object),
|
||||
fs: f,
|
||||
co: entries,
|
||||
}, nil
|
||||
case *upstream.Directory:
|
||||
return &Directory{
|
||||
Directory: e.(*upstream.Directory),
|
||||
cd: entries,
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown object type %T", e)
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -123,16 +91,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.action(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Rmdir(ctx, dir)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
@@ -142,22 +101,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Mkdir(ctx, dir)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
@@ -167,21 +111,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
for _, r := range f.upstreams {
|
||||
if r.Features().Purge == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
}
|
||||
upstreams, err := f.action(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Features().Purge(ctx)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Features().Purge(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -194,26 +124,15 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o := srcObj.UnWrap()
|
||||
u := o.UpstreamFs()
|
||||
do := u.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if !u.IsCreatable() {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
co, err := do(ctx, o, remote)
|
||||
if err != nil || co == nil {
|
||||
o, err := f.wr.Features().Copy(ctx, src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wo, err := f.wrapEntries(u.WrapObject(co))
|
||||
return wo.(*Object), err
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -226,47 +145,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
entries, err := f.actionEntries(o.candidates()...)
|
||||
o, err := f.wr.Features().Move(ctx, src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range entries {
|
||||
if e.UpstreamFs().Features().Move == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
}
|
||||
objs := make([]*upstream.Object, len(entries))
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
u := entries[i].UpstreamFs()
|
||||
o, ok := entries[i].(*upstream.Object)
|
||||
if !ok {
|
||||
errs[i] = errors.Wrap(fs.ErrorNotAFile, u.Name())
|
||||
return
|
||||
}
|
||||
mo, err := u.Features().Move(ctx, o.UnWrap(), remote)
|
||||
if err != nil || mo == nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(mo)
|
||||
})
|
||||
var en []upstream.Entry
|
||||
for _, o := range objs {
|
||||
if o != nil {
|
||||
en = append(en, o)
|
||||
}
|
||||
}
|
||||
e, err := f.wrapEntries(en...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.(*Object), errs.Err()
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
@@ -278,46 +165,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
sfs, ok := src.(*Fs)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
upstreams, err := sfs.action(ctx, srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, u := range upstreams {
|
||||
if u.Features().DirMove == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
su := upstreams[i]
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if u.RootFs.Root() == su.RootFs.Root() {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
errs[i] = errors.Wrap(fs.ErrorCantDirMove, su.Name()+":"+su.Root())
|
||||
return
|
||||
}
|
||||
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
|
||||
errs[i] = errors.Wrap(err, du.Name()+":"+du.Root())
|
||||
})
|
||||
errs = errs.FilterNil()
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, e := range errs {
|
||||
if errors.Cause(e) != fs.ErrorDirExists {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
return fs.ErrorDirExists
|
||||
return f.wr.Features().DirMove(ctx, srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
@@ -330,23 +183,23 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var uChans []chan time.Duration
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
if ChangeNotify := u.Features().ChangeNotify; ChangeNotify != nil {
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(ctx, fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range uChans {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range uChans {
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
@@ -355,103 +208,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
if do := f.upstreams[i].Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreams, err = f.create(ctx, srcPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(upstreams) == 1 {
|
||||
u := upstreams[0]
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = u.Features().PutStream(ctx, in, src, options...)
|
||||
} else {
|
||||
o, err = u.Put(ctx, in, src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := f.wrapEntries(u.WrapObject(o))
|
||||
return e.(*Object), err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(upstreams))
|
||||
writers := make([]io.Writer, len(upstreams))
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(upstreams)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
objs := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = u.Features().PutStream(ctx, readers[i], src, options...)
|
||||
} else {
|
||||
o, err = u.Put(ctx, readers[i], src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := f.wrapEntries(objs...)
|
||||
return e.(*Object), err
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,64 +221,29 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
o, err := f.wr.Features().PutStream(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
usage := &fs.Usage{
|
||||
Total: new(int64),
|
||||
Used: new(int64),
|
||||
Trashed: new(int64),
|
||||
Other: new(int64),
|
||||
Free: new(int64),
|
||||
Objects: new(int64),
|
||||
return f.wr.Features().About(ctx)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
usg, err := u.About(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if usg.Total != nil && usage.Total != nil {
|
||||
*usage.Total += *usg.Total
|
||||
} else {
|
||||
usage.Total = nil
|
||||
}
|
||||
if usg.Used != nil && usage.Used != nil {
|
||||
*usage.Used += *usg.Used
|
||||
} else {
|
||||
usage.Used = nil
|
||||
}
|
||||
if usg.Trashed != nil && usage.Trashed != nil {
|
||||
*usage.Trashed += *usg.Trashed
|
||||
} else {
|
||||
usage.Trashed = nil
|
||||
}
|
||||
if usg.Other != nil && usage.Other != nil {
|
||||
*usage.Other += *usg.Other
|
||||
} else {
|
||||
usage.Other = nil
|
||||
}
|
||||
if usg.Free != nil && usage.Free != nil {
|
||||
*usage.Free += *usg.Free
|
||||
} else {
|
||||
usage.Free = nil
|
||||
}
|
||||
if usg.Objects != nil && usage.Objects != nil {
|
||||
*usage.Objects += *usg.Objects
|
||||
} else {
|
||||
usage.Objects = nil
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -531,125 +256,60 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entriess := make([][]upstream.Entry, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
entries, err := u.List(ctx, dir)
|
||||
set := make(map[string]fs.DirEntry)
|
||||
found := false
|
||||
for _, remote := range f.remotes {
|
||||
var remoteEntries, err = remote.List(ctx, dir)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
return nil, errors.Wrapf(err, "List failed on %v", remote)
|
||||
}
|
||||
uEntries := make([]upstream.Entry, len(entries))
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
found = true
|
||||
for _, remoteEntry := range remoteEntries {
|
||||
set[remoteEntry.Remote()] = remoteEntry
|
||||
}
|
||||
entriess[i] = uEntries
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
if len(errs) == 0 {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errs.Err()
|
||||
}
|
||||
return f.mergeDirEntries(entriess)
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote union file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
objs := make([]*upstream.Object, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
o, err := u.NewObject(ctx, remote)
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
||||
func (f *Fs) NewObject(ctx context.Context, path string) (fs.Object, error) {
|
||||
for i := range f.remotes {
|
||||
var remote = f.remotes[len(f.remotes)-i-1]
|
||||
var obj, err = remote.NewObject(ctx, path)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
continue
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
var entries []upstream.Entry
|
||||
for _, o := range objs {
|
||||
if o != nil {
|
||||
entries = append(entries, o)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
e, err := f.wrapEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.(*Object), errs.Err()
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all upstreams
|
||||
// Precision is the greatest Precision of all remotes
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, u := range f.upstreams {
|
||||
if u.Precision() > greatestPrecision {
|
||||
greatestPrecision = u.Precision()
|
||||
for _, remote := range f.remotes {
|
||||
if remote.Precision() > greatestPrecision {
|
||||
greatestPrecision = remote.Precision()
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
func (f *Fs) action(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.actionPolicy.Action(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) actionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.actionPolicy.ActionEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.createPolicy.Create(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.createPolicy.CreateEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
|
||||
return f.searchPolicy.Search(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
entryMap := make(map[string]([]upstream.Entry))
|
||||
for _, en := range entriess {
|
||||
if en == nil {
|
||||
continue
|
||||
}
|
||||
for _, entry := range en {
|
||||
remote := entry.Remote()
|
||||
if f.Features().CaseInsensitive {
|
||||
remote = strings.ToLower(remote)
|
||||
}
|
||||
entryMap[remote] = append(entryMap[remote], entry)
|
||||
}
|
||||
}
|
||||
var entries fs.DirEntries
|
||||
for path := range entryMap {
|
||||
e, err := f.wrapEntries(entryMap[path]...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
@@ -660,64 +320,51 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
if len(opt.Remotes) == 0 {
|
||||
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Upstreams) == 0 {
|
||||
return nil, errors.New("union can't point to an empty upstream - check the value of the upstreams setting")
|
||||
if len(opt.Remotes) == 1 {
|
||||
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Upstreams) == 1 {
|
||||
return nil, errors.New("union can't point to a single upstream - check the value of the upstreams setting")
|
||||
}
|
||||
for _, u := range opt.Upstreams {
|
||||
if strings.HasPrefix(u, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the upstreams setting")
|
||||
for _, remote := range opt.Remotes {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
|
||||
}
|
||||
}
|
||||
|
||||
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
||||
errs := Errors(make([]error, len(opt.Upstreams)))
|
||||
multithread(len(opt.Upstreams), func(i int) {
|
||||
u := opt.Upstreams[i]
|
||||
upstreams[i], errs[i] = upstream.New(u, root, time.Duration(opt.CacheTime)*time.Second)
|
||||
})
|
||||
var usedUpstreams []*upstream.Fs
|
||||
var fserr error
|
||||
for i, err := range errs {
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
var remotes []fs.Fs
|
||||
for i := range opt.Remotes {
|
||||
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
|
||||
var remote = opt.Remotes[len(opt.Remotes)-i-1]
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only the upstreams returns ErrorIsFile would be used if any
|
||||
if err == fs.ErrorIsFile {
|
||||
usedUpstreams = append(usedUpstreams, upstreams[i])
|
||||
fserr = fs.ErrorIsFile
|
||||
var rootString = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName != "local" {
|
||||
rootString = configName + ":" + rootString
|
||||
}
|
||||
myFs, err := cache.Get(rootString)
|
||||
if err != nil {
|
||||
if err == fs.ErrorIsFile {
|
||||
return myFs, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
remotes = append(remotes, myFs)
|
||||
}
|
||||
if fserr == nil {
|
||||
usedUpstreams = upstreams
|
||||
|
||||
// Reverse the remotes again so they are in the order as before
|
||||
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
|
||||
remotes[i], remotes[j] = remotes[j], remotes[i]
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
upstreams: usedUpstreams,
|
||||
}
|
||||
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.createPolicy, err = policy.Get(opt.CreatePolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.searchPolicy, err = policy.Get(opt.SearchPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -729,14 +376,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
for _, f := range upstreams {
|
||||
if !f.IsWritable() {
|
||||
continue
|
||||
}
|
||||
features = features.Mask(f) // Mask all writable upstream fs
|
||||
}
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Really need the union of all upstreams for these, so
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
@@ -746,12 +388,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, u := range f.upstreams {
|
||||
uFeatures := u.Features()
|
||||
if uFeatures.ChangeNotify != nil {
|
||||
for _, remote := range f.remotes {
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if uFeatures.DirCacheFlush != nil {
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
@@ -765,34 +407,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.upstreams[0].Hashes()
|
||||
for _, u := range f.upstreams[1:] {
|
||||
hashSet = hashSet.Overlap(u.Hashes())
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, fserr
|
||||
}
|
||||
|
||||
func parentDir(absPath string) string {
|
||||
parent := path.Dir(strings.TrimRight(filepath.ToSlash(absPath), "/"))
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func multithread(num int, fn func(int)) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < num; i++ {
|
||||
wg.Add(1)
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fn(i)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
|
||||
@@ -2,154 +2,17 @@
|
||||
package union_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "lus"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "rand"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
RemoteName: "TestUnion:",
|
||||
NilObject: nil,
|
||||
SkipFsMatch: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,348 +0,0 @@
|
||||
package upstream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUsageFieldNotSupported stats the usage field is not supported by the backend
|
||||
ErrUsageFieldNotSupported = errors.New("this usage field is not supported")
|
||||
)
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
writable bool
|
||||
creatable bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
}
|
||||
|
||||
// Directory describes a wrapped Directory
|
||||
//
|
||||
// This is a wrapped Directory which contains the upstream Fs
|
||||
type Directory struct {
|
||||
fs.Directory
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which contains the upstream Fs
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Entry describe a warpped fs.DirEntry interface with the
|
||||
// information of upstream Fs
|
||||
type Entry interface {
|
||||
fs.DirEntry
|
||||
UpstreamFs() *Fs
|
||||
}
|
||||
|
||||
// New creates a new Fs based on the
|
||||
// string formatted `type:root_path(:ro/:nc)`
|
||||
func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
RootPath: root,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheExpiry: time.Now().Unix(),
|
||||
cacheTime: cacheTime,
|
||||
usage: &fs.Usage{},
|
||||
}
|
||||
if strings.HasSuffix(fsPath, ":ro") {
|
||||
f.writable = false
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
} else if strings.HasSuffix(fsPath, ":nc") {
|
||||
f.writable = true
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
}
|
||||
if configName != "local" {
|
||||
fsPath = configName + ":" + fsPath
|
||||
}
|
||||
rFs, err := cache.Get(fsPath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := path.Join(fsPath, filepath.ToSlash(root))
|
||||
myFs, err := cache.Get(rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = myFs
|
||||
return f, err
|
||||
}
|
||||
|
||||
// WrapDirectory wraps a fs.Directory to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &Directory{
|
||||
Directory: e,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapObject wraps a fs.Object to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapObject(o fs.Object) *Object {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapEntry wraps a fs.DirEntry to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
|
||||
switch e.(type) {
|
||||
case fs.Object:
|
||||
return f.WrapObject(e.(fs.Object)), nil
|
||||
case fs.Directory:
|
||||
return f.WrapDirectory(e.(fs.Directory)), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown object type %T", e)
|
||||
}
|
||||
}
|
||||
|
||||
// UpstreamFs get the upstream Fs the entry is stored in
|
||||
func (e *Directory) UpstreamFs() *Fs {
|
||||
return e.f
|
||||
}
|
||||
|
||||
// UpstreamFs get the upstream Fs the entry is stored in
|
||||
func (o *Object) UpstreamFs() *Fs {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// IsCreatable return if the fs is allowed to create new objects
|
||||
func (f *Fs) IsCreatable() bool {
|
||||
return f.creatable
|
||||
}
|
||||
|
||||
// IsWritable return if the fs is allowed to write
|
||||
func (f *Fs) IsWritable() bool {
|
||||
return f.writable
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.Fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
size := src.Size()
|
||||
if f.usage.Used != nil {
|
||||
*f.usage.Used += size
|
||||
}
|
||||
if f.usage.Free != nil {
|
||||
*f.usage.Free -= size
|
||||
}
|
||||
if f.usage.Objects != nil {
|
||||
*f.usage.Objects++
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Features().PutStream
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
o, err := do(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
size := o.Size()
|
||||
if f.usage.Used != nil {
|
||||
*f.usage.Used += size
|
||||
}
|
||||
if f.usage.Free != nil {
|
||||
*f.usage.Free -= size
|
||||
}
|
||||
if f.usage.Objects != nil {
|
||||
*f.usage.Objects++
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
size := o.Size()
|
||||
err := o.Object.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.f.cacheMutex.Lock()
|
||||
defer o.f.cacheMutex.Unlock()
|
||||
delta := o.Size() - size
|
||||
if delta <= 0 {
|
||||
return nil
|
||||
}
|
||||
if o.f.usage.Used != nil {
|
||||
*o.f.usage.Used += size
|
||||
}
|
||||
if o.f.usage.Free != nil {
|
||||
*o.f.usage.Free -= size
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return nil, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
return f.usage, nil
|
||||
}
|
||||
|
||||
// GetFreeSpace get the free space of the fs
|
||||
func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return math.MaxInt64, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Free == nil {
|
||||
return math.MaxInt64, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Free, nil
|
||||
}
|
||||
|
||||
// GetUsedSpace get the used space of the fs
|
||||
func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Used == nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Used, nil
|
||||
}
|
||||
|
||||
// GetNumObjects get the number of objects of the fs
|
||||
func (f *Fs) GetNumObjects() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Objects == nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Objects, nil
|
||||
}
|
||||
|
||||
func (f *Fs) updateUsage() (err error) {
|
||||
if do := f.RootFs.Features().About; do == nil {
|
||||
return ErrUsageFieldNotSupported
|
||||
}
|
||||
done := false
|
||||
f.cacheOnce.Do(func() {
|
||||
f.cacheMutex.Lock()
|
||||
err = f.updateUsageCore(false)
|
||||
f.cacheMutex.Unlock()
|
||||
done = true
|
||||
})
|
||||
if done {
|
||||
return err
|
||||
}
|
||||
if !f.cacheUpdate {
|
||||
f.cacheUpdate = true
|
||||
go func() {
|
||||
_ = f.updateUsageCore(true)
|
||||
f.cacheUpdate = false
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) updateUsageCore(lock bool) error {
|
||||
// Run in background, should not be cancelled by user
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
usage, err := f.RootFs.Features().About(ctx)
|
||||
if err != nil {
|
||||
f.cacheUpdate = false
|
||||
return err
|
||||
}
|
||||
if lock {
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
}
|
||||
// Store usage
|
||||
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
|
||||
f.usage = usage
|
||||
return nil
|
||||
}
|
||||
@@ -989,14 +989,13 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
if q.Available >= 0 {
|
||||
usage.Free = fs.NewUsageValue(q.Available)
|
||||
}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
if q.Available != 0 || q.Used != 0 {
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
@@ -110,7 +110,7 @@ def read_doc(doc):
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
|
||||
# Interpret provider shortcode
|
||||
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)
|
||||
|
||||
@@ -371,12 +371,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package mount
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -42,12 +41,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
@@ -74,11 +73,7 @@ func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written
|
||||
var n int
|
||||
var err error
|
||||
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
|
||||
if f.h.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || f.h.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
} else {
|
||||
n, err = f.h.Write(data)
|
||||
}
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
return uint32(n), translateError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -322,9 +322,6 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
VolumeName = strings.Replace(VolumeName, ":", " ", -1)
|
||||
VolumeName = strings.Replace(VolumeName, "/", " ", -1)
|
||||
VolumeName = strings.TrimSpace(VolumeName)
|
||||
if runtime.GOOS == "windows" && len(VolumeName) > 32 {
|
||||
VolumeName = VolumeName[:32]
|
||||
}
|
||||
|
||||
// Start background task if --background is specified
|
||||
if Daemon {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -44,11 +45,11 @@ var (
|
||||
func RunTests(t *testing.T, fn MountFn) {
|
||||
mountFn = fn
|
||||
flag.Parse()
|
||||
cacheModes := []vfs.CacheMode{
|
||||
vfs.CacheModeOff,
|
||||
vfs.CacheModeMinimal,
|
||||
vfs.CacheModeWrites,
|
||||
vfs.CacheModeFull,
|
||||
cacheModes := []vfscommon.CacheMode{
|
||||
vfscommon.CacheModeOff,
|
||||
vfscommon.CacheModeMinimal,
|
||||
vfscommon.CacheModeWrites,
|
||||
vfscommon.CacheModeFull,
|
||||
}
|
||||
run = newRun()
|
||||
for _, cacheMode := range cacheModes {
|
||||
@@ -207,7 +208,7 @@ func (r *Run) umount() {
|
||||
}
|
||||
|
||||
// cacheMode flushes the VFS and changes the CacheMode
|
||||
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
|
||||
func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping cacheMode")
|
||||
return
|
||||
|
||||
@@ -5,10 +5,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileNoWrite tests writing a file with no write()'s to it
|
||||
@@ -91,7 +90,7 @@ func TestWriteFileFsync(t *testing.T) {
|
||||
func TestWriteFileDup(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
@@ -136,7 +135,7 @@ func TestWriteFileDup(t *testing.T) {
|
||||
func TestWriteFileAppend(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,10 +6,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
@@ -45,7 +44,7 @@ func TestWriteFileDoubleClose(t *testing.T) {
|
||||
|
||||
// write to the other dup
|
||||
_, err = unix.Write(fd2, buf)
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
// produces an error if cache mode < writes
|
||||
assert.Error(t, err, "input/output error")
|
||||
} else {
|
||||
|
||||
@@ -339,8 +339,3 @@ Contributors
|
||||
* Tim Gallant <me@timgallant.us>
|
||||
* Frederick Zhang <frederick888@tsundere.moe>
|
||||
* valery1707 <valery1707@gmail.com>
|
||||
* Yves G <theYinYeti@yalis.fr>
|
||||
* Shing Kit Chan <chanshingkit@gmail.com>
|
||||
* Franklyn Tackitt <franklyn@tackitt.net>
|
||||
* Robert-André Mauchin <zebob.m@gmail.com>
|
||||
* evileye <48332831+ibiruai@users.noreply.github.com>
|
||||
|
||||
@@ -113,39 +113,29 @@ Rclone has 3 ways of authenticating with Azure Blob Storage:
|
||||
|
||||
#### Account and Key
|
||||
|
||||
This is the most straight forward and least flexible way. Just fill
|
||||
in the `account` and `key` lines and leave the rest blank.
|
||||
This is the most straight forward and least flexible way. Just fill in the `account` and `key` lines and leave the rest blank.
|
||||
|
||||
#### SAS URL
|
||||
|
||||
This can be an account level SAS URL or container level SAS URL.
|
||||
This can be an account level SAS URL or container level SAS URL
|
||||
|
||||
To use it leave `account`, `key` blank and fill in `sas_url`.
|
||||
To use it leave `account`, `key` blank and fill in `sas_url`.
|
||||
|
||||
An account level SAS URL or container level SAS URL can be obtained
|
||||
from the Azure portal or the Azure Storage Explorer. To get a
|
||||
container level SAS URL right click on a container in the Azure Blob
|
||||
explorer in the Azure portal.
|
||||
Account level SAS URL or container level SAS URL can be obtained from Azure portal or Azure Storage Explorer.
|
||||
To get a container level SAS URL right click on a container in the Azure Blob explorer in the Azure portal.
|
||||
|
||||
If you use a container level SAS URL, rclone operations are permitted
|
||||
only on a particular container, eg
|
||||
If You use container level SAS URL, rclone operations are permitted only on particular container, eg
|
||||
|
||||
rclone ls azureblob:container
|
||||
rclone ls azureblob:container or rclone ls azureblob:
|
||||
|
||||
You can also list the single container from the root. This will only
|
||||
show the container specified by the SAS URL.
|
||||
Since container name already exists in SAS URL, you can leave it empty as well.
|
||||
|
||||
$ rclone lsd azureblob:
|
||||
container/
|
||||
|
||||
Note that you can't see or access any other containers - this will
|
||||
fail
|
||||
However these will not work
|
||||
|
||||
rclone lsd azureblob:
|
||||
rclone ls azureblob:othercontainer
|
||||
|
||||
Container level SAS URLs are useful for temporarily allowing third
|
||||
parties access to a single container or putting credentials into an
|
||||
untrusted environment such as a CI build server.
|
||||
This would be useful for temporarily allowing third parties access to a single container or putting credentials into an untrusted environment.
|
||||
|
||||
### Multipart uploads ###
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ date: "2020-02-01"
|
||||
## v1.51.0 - 2020-02-01
|
||||
|
||||
* New backends
|
||||
* [Memory](/memory/) (Nick Craig-Wood)
|
||||
* [Sugarsync](/sugarsync/) (Nick Craig-Wood)
|
||||
* [Memory](/memory) (Nick Craig-Wood)
|
||||
* [Sugarsync](/sugarsync) (Nick Craig-Wood)
|
||||
* New Features
|
||||
* Adjust all backends to have `--backend-encoding` parameter (Nick Craig-Wood)
|
||||
* this enables the encoding for special characters to be adjusted or disabled
|
||||
@@ -165,9 +165,9 @@ date: "2020-02-01"
|
||||
## v1.50.0 - 2019-10-26
|
||||
|
||||
* New backends
|
||||
* [Citrix Sharefile](/sharefile/) (Nick Craig-Wood)
|
||||
* [Chunker](/chunker/) - an overlay backend to split files into smaller parts (Ivan Andreev)
|
||||
* [Mail.ru Cloud](/mailru/) (Ivan Andreev)
|
||||
* [Citrix Sharefile](/sharefile) (Nick Craig-Wood)
|
||||
* [Chunker](/chunker) - an overlay backend to split files into smaller parts (Ivan Andreev)
|
||||
* [Mail.ru Cloud](/mailru) (Ivan Andreev)
|
||||
* New Features
|
||||
* encodings (Fabian Möller & Nick Craig-Wood)
|
||||
* All backends now use file name encoding to ensure any file name can be written to any backend.
|
||||
@@ -320,7 +320,7 @@ date: "2020-02-01"
|
||||
|
||||
* New backends
|
||||
* [1fichier](/fichier/) (Laura Hausmann)
|
||||
* [Google Photos](/googlephotos/) (Nick Craig-Wood)
|
||||
* [Google Photos](/googlephotos) (Nick Craig-Wood)
|
||||
* [Putio](/putio/) (Cenk Alti)
|
||||
* [premiumize.me](/premiumizeme/) (Nick Craig-Wood)
|
||||
* New Features
|
||||
|
||||
@@ -736,20 +736,6 @@ When the limit is reached all transfers will stop immediately.
|
||||
|
||||
Rclone will exit with exit code 8 if the transfer limit is reached.
|
||||
|
||||
### --cutoff-mode=hard|soft|cautious ###
|
||||
|
||||
This modifies the behavior of `--max-transfer`
|
||||
Defaults to `--cutoff-mode=hard`.
|
||||
|
||||
Specifiying `--cutoff-mode=hard` will stop transferring immediately
|
||||
when Rclone reaches the limit.
|
||||
|
||||
Specifiying `--cutoff-mode=soft` will stop starting new transfers
|
||||
when Rclone reaches the limit.
|
||||
|
||||
Specifiying `--cutoff-mode=cautious` will try to prevent Rclone
|
||||
from reaching the limit.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
When checking whether a file has been modified, this is the maximum
|
||||
|
||||
@@ -9,7 +9,6 @@ date: "2018-03-05"
|
||||
If rclone is run with the `--rc` flag then it starts an http server
|
||||
which can be used to remote control rclone.
|
||||
|
||||
|
||||
If you just want to run a remote control then see the [rcd command](/commands/rclone_rcd/).
|
||||
|
||||
**NB** this is experimental and everything here is subject to change!
|
||||
@@ -86,12 +85,6 @@ style.
|
||||
|
||||
Default Off.
|
||||
|
||||
### --rc-enable-metrics
|
||||
|
||||
Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`.
|
||||
|
||||
Default Off.
|
||||
|
||||
### --rc-web-gui
|
||||
|
||||
Set this flag to serve the default web gui on the same port as rclone.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
title: "Union"
|
||||
description: "Remote Unification"
|
||||
date: "2020-01-25"
|
||||
date: "2018-08-29"
|
||||
---
|
||||
|
||||
<i class="fa fa-link"></i> Union
|
||||
@@ -12,90 +12,22 @@ The `union` remote provides a unification similar to UnionFS using other remotes
|
||||
Paths may be as deep as required or a local path,
|
||||
eg `remote:directory/subdirectory` or `/directory/subdirectory`.
|
||||
|
||||
During the initial setup with `rclone config` you will specify the upstream
|
||||
remotes as a space separated list. The upstream remotes can either be a local paths or other remotes.
|
||||
During the initial setup with `rclone config` you will specify the target
|
||||
remotes as a space separated list. The target remotes can either be a local paths or other remotes.
|
||||
|
||||
Attribute `:ro` and `:nc` can be attach to the end of path to tag the remote as **read only** or **no create**,
|
||||
eg `remote:directory/subdirectory:ro` or `remote:directory/subdirectory:nc`.
|
||||
The order of the remotes is important as it defines which remotes take precedence over others if there are files with the same name in the same logical path.
|
||||
The last remote is the topmost remote and replaces files with the same name from previous remotes.
|
||||
|
||||
Subfolders can be used in upstream remotes. Assume a union remote named `backup`
|
||||
with the remotes `mydrive:private/backup`. Invoking `rclone mkdir backup:desktop`
|
||||
Only the last remote is used to write to and delete from, all other remotes are read-only.
|
||||
|
||||
Subfolders can be used in target remote. Assume a union remote named `backup`
|
||||
with the remotes `mydrive:private/backup mydrive2:/backup`. Invoking `rclone mkdir backup:desktop`
|
||||
is exactly the same as invoking `rclone mkdir mydrive2:/backup/desktop`.
|
||||
|
||||
There will be no special handling of paths containing `..` segments.
|
||||
Invoking `rclone mkdir backup:../desktop` is exactly the same as invoking
|
||||
`rclone mkdir mydrive2:/backup/../desktop`.
|
||||
|
||||
### Behavior / Policies
|
||||
|
||||
The behavior of union backend is inspired by [trapexit/mergerfs](https://github.com/trapexit/mergerfs). All functions are grouped into 3 categories: **action**, **create** and **search**. These functions and categories can be assigned a policy which dictates what file or directory is chosen when performing that behavior. Any policy can be assigned to a function or category though some may not be very useful in practice. For instance: **rand** (random) may be useful for file creation (create) but could lead to very odd behavior if used for `delete` if there were more than one copy of the file.
|
||||
|
||||
#### Function / Category classifications
|
||||
|
||||
| Category | Description | Functions |
|
||||
|----------|--------------------------|-------------------------------------------------------------------------------------|
|
||||
| action | Writing Existing file | move, rmdir, rmdirs, delete, purge and copy, sync (as destination when file exist) |
|
||||
| create | Create non-existing file | copy, sync (as destination when file not exist) |
|
||||
| search | Reading and listing file | ls, lsd, lsl, cat, md5sum, sha1sum and copy, sync (as source) |
|
||||
| N/A | | size, about |
|
||||
|
||||
#### Path Preservation
|
||||
|
||||
Policies, as described below, are of two basic types. `path preserving` and `non-path preserving`.
|
||||
|
||||
All policies which start with `ep` (**epff**, **eplfs**, **eplus**, **epmfs**, **eprand**) are `path preserving`. `ep` stands for `existing path`.
|
||||
|
||||
A path preserving policy will only consider upstreams where the relative path being accessed already exists.
|
||||
|
||||
When using non-path preserving policies paths will be created in target upstreams as necessary.
|
||||
|
||||
#### Quota Relevant Policies
|
||||
|
||||
Some policies rely on quota information. These policies should be used only if your upstreams support the respective quota fields.
|
||||
|
||||
| Policy | Required Field |
|
||||
|------------|----------------|
|
||||
| lfs, eplfs | Free |
|
||||
| mfs, epmfs | Free |
|
||||
| lus, eplus | Used |
|
||||
| lno, eplno | Objects |
|
||||
|
||||
To check if your upstream support the field, run `rclone about remote: [flags]` and see if the reuqired field exists.
|
||||
|
||||
#### Filters
|
||||
|
||||
Policies basically search upstream remotes and create a list of files / paths for functions to work on. The policy is responsible for filtering and sorting. The policy type defines the sorting but filtering is mostly uniform as described below.
|
||||
|
||||
* No **search** policies filter.
|
||||
* All **action** policies will filter out remotes which are tagged as **read-only**.
|
||||
* All **create** policies will filter out remotes which are tagged **read-only** or **no-create**.
|
||||
|
||||
If all remotes are filtered an error will be returned.
|
||||
|
||||
#### Policy descriptions
|
||||
|
||||
THe policies definition are inspired by [trapexit/mergerfs](https://github.com/trapexit/mergerfs) but not exactly the same. Some policy definition could be different due to the much larger latency of remote file systems.
|
||||
|
||||
| Policy | Description |
|
||||
|------------------|------------------------------------------------------------|
|
||||
| all | Search category: same as **epall**. Action category: same as **epall**. Create category: act on all remotes. |
|
||||
| epall (existing path, all) | Search category: Given this order configured, act on the first one found where the relative path exists. Action category: apply to all found. Create category: act on all remotes where the relative path exists. |
|
||||
| epff (existing path, first found) | Act on the first one found, by the time upstreams reply, where the relative path exists. |
|
||||
| eplfs (existing path, least free space) | Of all the remotes on which the relative path exists choose the one with the least free space. |
|
||||
| eplus (existing path, least used space) | Of all the remotes on which the relative path exists choose the one with the least used space. |
|
||||
| eplno (existing path, least number of objects) | Of all the remotes on which the relative path exists choose the one with the least number of objects. |
|
||||
| epmfs (existing path, most free space) | Of all the remotes on which the relative path exists choose the one with the most free space. |
|
||||
| eprand (existing path, random) | Calls **epall** and then randomizes. Returns only one remote. |
|
||||
| ff (first found) | Search category: same as **epff**. Action category: same as **epff**. Create category: Act on the first one found by the time upstreams reply. |
|
||||
| lfs (least free space) | Search category: same as **eplfs**. Action category: same as **eplfs**. Create category: Pick the remote with the least available free space. |
|
||||
| lus (least used space) | Search category: same as **eplus**. Action category: same as **eplus**. Create category: Pick the remote with the least used space. |
|
||||
| lno (least number of objects) | Search category: same as **eplno**. Action category: same as **eplno**. Create category: Pick the remote with the least number of objects. |
|
||||
| mfs (most free space) | Search category: same as **epmfs**. Action category: same as **epmfs**. Create category: Pick the remote with the most available free space. |
|
||||
| newest | Pick the file / directory with the largest mtime. |
|
||||
| rand (random) | Calls **all** and then randomizes. Returns only one remote. |
|
||||
|
||||
### Setup
|
||||
|
||||
Here is an example of how to make a union called `remote` for local folders.
|
||||
First run:
|
||||
|
||||
@@ -117,27 +49,16 @@ XX / Union merges the contents of several remotes
|
||||
\ "union"
|
||||
[snip]
|
||||
Storage> union
|
||||
List of space separated upstreams.
|
||||
Can be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.
|
||||
List of space separated remotes.
|
||||
Can be 'remotea:test/dir remoteb:', '"remotea:test/space dir" remoteb:', etc.
|
||||
The last remote is used to write to.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
upstreams>
|
||||
Policy to choose upstream on ACTION class.
|
||||
Enter a string value. Press Enter for the default ("epall").
|
||||
action_policy>
|
||||
Policy to choose upstream on CREATE class.
|
||||
Enter a string value. Press Enter for the default ("epmfs").
|
||||
create_policy>
|
||||
Policy to choose upstream on SEARCH class.
|
||||
Enter a string value. Press Enter for the default ("ff").
|
||||
search_policy>
|
||||
Cache time of usage and free space (in seconds)
|
||||
Enter a signed integer. Press Enter for the default ("120").
|
||||
cache_time>
|
||||
remotes>
|
||||
Remote config
|
||||
--------------------
|
||||
[remote]
|
||||
type = union
|
||||
upstreams = C:\dir1 C:\dir2 C:\dir3
|
||||
remotes = C:\dir1 C:\dir2 C:\dir3
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
e) Edit this remote
|
||||
@@ -176,53 +97,17 @@ Copy another local directory to the union directory called source, which will be
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/union/union.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to union (Union merges the contents of several upstream fs).
|
||||
Here are the standard options specific to union (Union merges the contents of several remotes).
|
||||
|
||||
#### --union-upstreams
|
||||
#### --union-remotes
|
||||
|
||||
List of space separated upstreams.
|
||||
Can be 'upstreama:test/dir upstreamb:', '"upstreama:test/space:ro dir" upstreamb:', etc.
|
||||
List of space separated remotes.
|
||||
Can be 'remotea:test/dir remoteb:', '"remotea:test/space dir" remoteb:', etc.
|
||||
The last remote is used to write to.
|
||||
|
||||
|
||||
- Config: upstreams
|
||||
- Env Var: RCLONE_UNION_UPSTREAMS
|
||||
- Config: remotes
|
||||
- Env Var: RCLONE_UNION_REMOTES
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --union-action-policy
|
||||
|
||||
Policy to choose upstream on ACTION class.
|
||||
|
||||
- Config: action_policy
|
||||
- Env Var: RCLONE_UNION_ACTION_POLICY
|
||||
- Type: string
|
||||
- Default: "epall"
|
||||
|
||||
#### --union-create-policy
|
||||
|
||||
Policy to choose upstream on CREATE class.
|
||||
|
||||
- Config: create_policy
|
||||
- Env Var: RCLONE_UNION_CREATE_POLICY
|
||||
- Type: string
|
||||
- Default: "epmfs"
|
||||
|
||||
#### --union-search-policy
|
||||
|
||||
Policy to choose upstream on SEARCH class.
|
||||
|
||||
- Config: search_policy
|
||||
- Env Var: RCLONE_UNION_SEARCH_POLICY
|
||||
- Type: string
|
||||
- Default: "ff"
|
||||
|
||||
#### --union-cache-time
|
||||
|
||||
Cache time of usage and free space (in seconds)
|
||||
|
||||
- Config: cache_time
|
||||
- Env Var: RCLONE_UNION_CACHE_TIME
|
||||
- Type: int
|
||||
- Default: 120
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
@@ -61,10 +61,7 @@ func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name str
|
||||
exit: make(chan struct{}),
|
||||
avg: 0,
|
||||
lpTime: time.Now(),
|
||||
max: -1,
|
||||
}
|
||||
if fs.Config.CutoffMode == fs.CutoffModeHard {
|
||||
acc.max = int64((fs.Config.MaxTransfer))
|
||||
max: int64(fs.Config.MaxTransfer),
|
||||
}
|
||||
go acc.averageLoop()
|
||||
stats.inProgress.set(acc.name, acc)
|
||||
|
||||
@@ -197,12 +197,9 @@ func TestAccountAccounter(t *testing.T) {
|
||||
|
||||
func TestAccountMaxTransfer(t *testing.T) {
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
}()
|
||||
|
||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||
@@ -221,20 +218,6 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, ErrorMaxTransferLimitReached, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
stats = NewStats()
|
||||
acc = newAccountSizeName(stats, in, 1, "test")
|
||||
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestShortenName(t *testing.T) {
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var namespace = "rclone_"
|
||||
|
||||
// RcloneCollector is a Prometheus collector for Rclone
|
||||
type RcloneCollector struct {
|
||||
bytesTransferred *prometheus.Desc
|
||||
transferSpeed *prometheus.Desc
|
||||
numOfErrors *prometheus.Desc
|
||||
numOfCheckFiles *prometheus.Desc
|
||||
transferredFiles *prometheus.Desc
|
||||
deletes *prometheus.Desc
|
||||
fatalError *prometheus.Desc
|
||||
retryError *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewRcloneCollector make a new RcloneCollector
|
||||
func NewRcloneCollector() *RcloneCollector {
|
||||
return &RcloneCollector{
|
||||
bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total",
|
||||
"Total transferred bytes since the start of the Rclone process",
|
||||
nil, nil,
|
||||
),
|
||||
transferSpeed: prometheus.NewDesc(namespace+"speed",
|
||||
"Average speed in bytes/sec since the start of the Rclone process",
|
||||
nil, nil,
|
||||
),
|
||||
numOfErrors: prometheus.NewDesc(namespace+"errors_total",
|
||||
"Number of errors thrown",
|
||||
nil, nil,
|
||||
),
|
||||
numOfCheckFiles: prometheus.NewDesc(namespace+"checked_files_total",
|
||||
"Number of checked files",
|
||||
nil, nil,
|
||||
),
|
||||
transferredFiles: prometheus.NewDesc(namespace+"files_transferred_total",
|
||||
"Number of transferred files",
|
||||
nil, nil,
|
||||
),
|
||||
deletes: prometheus.NewDesc(namespace+"files_deleted_total",
|
||||
"Total number of files deleted",
|
||||
nil, nil,
|
||||
),
|
||||
fatalError: prometheus.NewDesc(namespace+"fatal_error",
|
||||
"Whether a fatal error has occurred",
|
||||
nil, nil,
|
||||
),
|
||||
retryError: prometheus.NewDesc(namespace+"retry_error",
|
||||
"Whether there has been an error that will be retried",
|
||||
nil, nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Describe is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
||||
func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- c.bytesTransferred
|
||||
ch <- c.transferSpeed
|
||||
ch <- c.numOfErrors
|
||||
ch <- c.numOfCheckFiles
|
||||
ch <- c.transferredFiles
|
||||
ch <- c.deletes
|
||||
ch <- c.fatalError
|
||||
ch <- c.retryError
|
||||
}
|
||||
|
||||
// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
||||
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
s := GlobalStats()
|
||||
s.mu.RLock()
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
||||
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.Speed())
|
||||
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
|
||||
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
|
||||
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))
|
||||
ch <- prometheus.MustNewConstMetric(c.deletes, prometheus.CounterValue, float64(s.deletes))
|
||||
ch <- prometheus.MustNewConstMetric(c.fatalError, prometheus.GaugeValue, bool2Float(s.fatalError))
|
||||
ch <- prometheus.MustNewConstMetric(c.retryError, prometheus.GaugeValue, bool2Float(s.retryError))
|
||||
|
||||
s.mu.RUnlock()
|
||||
}
|
||||
|
||||
// bool2Float is a small function to convert a boolean into a float64 value that can be used for Prometheus
|
||||
func bool2Float(e bool) float64 {
|
||||
if e {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -56,7 +56,13 @@ func NewStats() *StatsInfo {
|
||||
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
s.mu.RLock()
|
||||
out["speed"] = s.Speed()
|
||||
dt := s.totalDuration()
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
out["speed"] = speed
|
||||
out["bytes"] = s.bytes
|
||||
out["errors"] = s.errors
|
||||
out["fatalError"] = s.fatalError
|
||||
@@ -64,7 +70,7 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
out["checks"] = s.checks
|
||||
out["transfers"] = s.transfers
|
||||
out["deletes"] = s.deletes
|
||||
out["elapsedTime"] = s.totalDuration().Seconds()
|
||||
out["elapsedTime"] = dtSeconds
|
||||
s.mu.RUnlock()
|
||||
if !s.checking.empty() {
|
||||
var c []string
|
||||
@@ -95,17 +101,6 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Speed returns the average speed of the transfer in bytes/second
|
||||
func (s *StatsInfo) Speed() float64 {
|
||||
dt := s.totalDuration()
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
return speed
|
||||
}
|
||||
|
||||
func (s *StatsInfo) transferRemoteStats(name string) rc.Params {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
@@ -387,22 +382,6 @@ func (s *StatsInfo) GetBytes() int64 {
|
||||
return s.bytes
|
||||
}
|
||||
|
||||
// GetBytesWithPending returns the number of bytes transferred and remaining transfers
|
||||
func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.bytes + pending
|
||||
}
|
||||
|
||||
// Errors updates the stats for errors
|
||||
func (s *StatsInfo) Errors(errors int64) {
|
||||
s.mu.Lock()
|
||||
|
||||
@@ -93,7 +93,6 @@ type ConfigInfo struct {
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
|
||||
@@ -94,7 +94,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.DurationVarP(flagSet, &fs.Config.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
|
||||
flags.FVarP(flagSet, &fs.Config.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CutoffMode describes the possible delete modes in the config
|
||||
type CutoffMode byte
|
||||
|
||||
// MaxTransferMode constants
|
||||
const (
|
||||
CutoffModeHard CutoffMode = iota
|
||||
CutoffModeSoft
|
||||
CutoffModeCautious
|
||||
CutoffModeDefault = CutoffModeHard
|
||||
)
|
||||
|
||||
var cutoffModeToString = []string{
|
||||
CutoffModeHard: "HARD",
|
||||
CutoffModeSoft: "SOFT",
|
||||
CutoffModeCautious: "CAUTIOUS",
|
||||
}
|
||||
|
||||
// String turns a LogLevel into a string
|
||||
func (m CutoffMode) String() string {
|
||||
if m >= CutoffMode(len(cutoffModeToString)) {
|
||||
return fmt.Sprintf("CutoffMode(%d)", m)
|
||||
}
|
||||
return cutoffModeToString[m]
|
||||
}
|
||||
|
||||
// Set a LogLevel
|
||||
func (m *CutoffMode) Set(s string) error {
|
||||
for n, name := range cutoffModeToString {
|
||||
if s != "" && name == strings.ToUpper(s) {
|
||||
*m = CutoffMode(n)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("Unknown cutoff mode %q", s)
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (m *CutoffMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package fs
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*CutoffMode)(nil)
|
||||
@@ -362,11 +362,11 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
|
||||
(fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReached
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
// Check transfer limit for server side copies
|
||||
if fs.Config.MaxTransfer >= 0 && accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReached
|
||||
}
|
||||
in := tr.Account(nil) // account the transfer
|
||||
in.ServerSideCopyStart()
|
||||
newDst, err = doCopy(ctx, src, remote)
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -1528,58 +1527,3 @@ func TestRcatSize(t *testing.T) {
|
||||
// Check files exist
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("file1", "file1 contents", t1)
|
||||
file2 := r.WriteFile("file2", "file2 contents...........", t2)
|
||||
|
||||
rfile1 := file1
|
||||
rfile1.Path = "sub/file1"
|
||||
rfile2 := file2
|
||||
rfile2.Path = "sub/file2"
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err := operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile1.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeCautious
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1, rfile2)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ type Options struct {
|
||||
WebGUINoOpenBrowser bool // set to disable auto opening browser
|
||||
WebGUIFetchURL string // set the default url for fetching webgui
|
||||
AccessControlAllowOrigin string // set the access control for CORS configuration
|
||||
EnableMetrics bool // set to disable prometheus metrics on /metrics
|
||||
JobExpireDuration time.Duration
|
||||
JobExpireInterval time.Duration
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.WebGUINoOpenBrowser, "rc-web-gui-no-open-browser", "", false, "Don't open the browser automatically")
|
||||
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
|
||||
flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS.")
|
||||
flags.BoolVarP(flagSet, &Opt.EnableMetrics, "rc-enable-metrics", "", false, "Enable prometheus metrics on /metrics")
|
||||
flags.DurationVarP(flagSet, &Opt.JobExpireDuration, "rc-job-expire-duration", "", Opt.JobExpireDuration, "expire finished async jobs older than this value")
|
||||
flags.DurationVarP(flagSet, &Opt.JobExpireInterval, "rc-job-expire-interval", "", Opt.JobExpireInterval, "interval to check for expired async jobs")
|
||||
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
|
||||
|
||||
@@ -16,14 +16,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/serve"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
@@ -31,16 +26,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
)
|
||||
|
||||
var promHandler http.Handler
|
||||
|
||||
func init() {
|
||||
rcloneCollector := accounting.NewRcloneCollector()
|
||||
prometheus.MustRegister(rcloneCollector)
|
||||
promHandler = promhttp.Handler()
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
//
|
||||
// If the server wasn't configured the *Server returned may be nil
|
||||
@@ -347,9 +335,6 @@ func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string)
|
||||
// Serve /[fs]/remote files
|
||||
s.serveRemote(w, r, match[2], match[1])
|
||||
return
|
||||
case path == "metrics" && s.opt.EnableMetrics:
|
||||
promHandler.ServeHTTP(w, r)
|
||||
return
|
||||
case path == "*" && s.opt.Serve:
|
||||
// Serve /* as the remote listing
|
||||
s.serveRoot(w, r)
|
||||
|
||||
@@ -12,12 +12,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -483,59 +481,6 @@ func TestMethods(t *testing.T) {
|
||||
testServer(t, tests, &opt)
|
||||
}
|
||||
|
||||
func TestMetrics(t *testing.T) {
|
||||
stats := accounting.GlobalStats()
|
||||
tests := makeMetricsTestCases(stats)
|
||||
opt := newTestOpt()
|
||||
opt.EnableMetrics = true
|
||||
testServer(t, tests, &opt)
|
||||
|
||||
// Test changing a couple options
|
||||
stats.Bytes(500)
|
||||
stats.Deletes(30)
|
||||
stats.Errors(2)
|
||||
stats.Bytes(324)
|
||||
|
||||
tests = makeMetricsTestCases(stats)
|
||||
testServer(t, tests, &opt)
|
||||
}
|
||||
|
||||
func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) {
|
||||
tests = []testRun{{
|
||||
Name: "Bytes Transferred Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())),
|
||||
}, {
|
||||
Name: "Checked Files Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())),
|
||||
}, {
|
||||
Name: "Errors Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())),
|
||||
}, {
|
||||
Name: "Deleted Files Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.Deletes(0))),
|
||||
}, {
|
||||
Name: "Files Transferred Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var matchRemoteDirListing = regexp.MustCompile(`<title>List of all rclone remotes.</title>`)
|
||||
|
||||
func TestServingRoot(t *testing.T) {
|
||||
|
||||
@@ -879,15 +879,11 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
// Do the same thing to the entire contents of the directory
|
||||
_, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
// Only record matched (src & dst) empty dirs when performing move
|
||||
if s.DoMove {
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
}
|
||||
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
}
|
||||
// FIXME src is dir, dst is file
|
||||
|
||||
4
go.mod
4
go.mod
@@ -15,6 +15,7 @@ require (
|
||||
github.com/billziss-gh/cgofuse v1.2.0
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible
|
||||
github.com/etcd-io/bbolt v1.3.3
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54
|
||||
@@ -38,7 +39,6 @@ require (
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/sevlyar/go-daemon v0.1.5
|
||||
@@ -53,7 +53,7 @@ require (
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
goftp.io/server v0.3.2
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0
|
||||
|
||||
39
go.sum
39
go.sum
@@ -51,9 +51,7 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efg
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo=
|
||||
github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
@@ -68,19 +66,13 @@ github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn
|
||||
github.com/aws/aws-sdk-go v1.29.9 h1:PHq9ddjfZYfCOXyqHKiCZ1CHRAk7nXhV7WTqj5l+bmQ=
|
||||
github.com/aws/aws-sdk-go v1.29.9/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA=
|
||||
github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@@ -106,6 +98,8 @@ github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+Lh
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@@ -114,7 +108,6 @@ github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1T
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
@@ -153,7 +146,6 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
@@ -190,8 +182,6 @@ github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3/go.mod h1:PwUeyujmhaG
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
@@ -235,15 +225,10 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
|
||||
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
@@ -278,29 +263,14 @@ github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
@@ -433,7 +403,6 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -480,7 +449,6 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -576,7 +544,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -587,8 +554,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@@ -1,8 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,67 +0,0 @@
|
||||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
||||
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -1,236 +0,0 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
||||
0
vendor/go.etcd.io/bbolt/.gitignore → vendor/github.com/etcd-io/bbolt/.gitignore
generated
vendored
0
vendor/go.etcd.io/bbolt/.gitignore → vendor/github.com/etcd-io/bbolt/.gitignore
generated
vendored
0
vendor/go.etcd.io/bbolt/.travis.yml → vendor/github.com/etcd-io/bbolt/.travis.yml
generated
vendored
0
vendor/go.etcd.io/bbolt/.travis.yml → vendor/github.com/etcd-io/bbolt/.travis.yml
generated
vendored
0
vendor/go.etcd.io/bbolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
0
vendor/go.etcd.io/bbolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
0
vendor/go.etcd.io/bbolt/Makefile → vendor/github.com/etcd-io/bbolt/Makefile
generated
vendored
0
vendor/go.etcd.io/bbolt/Makefile → vendor/github.com/etcd-io/bbolt/Makefile
generated
vendored
0
vendor/go.etcd.io/bbolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
0
vendor/go.etcd.io/bbolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_386.go → vendor/github.com/etcd-io/bbolt/bolt_386.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_386.go → vendor/github.com/etcd-io/bbolt/bolt_386.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_arm.go → vendor/github.com/etcd-io/bbolt/bolt_arm.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_arm.go → vendor/github.com/etcd-io/bbolt/bolt_arm.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_ppc.go → vendor/github.com/etcd-io/bbolt/bolt_ppc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_ppc.go → vendor/github.com/etcd-io/bbolt/bolt_ppc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_unix.go → vendor/github.com/etcd-io/bbolt/bolt_unix.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_unix.go → vendor/github.com/etcd-io/bbolt/bolt_unix.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bucket.go → vendor/github.com/etcd-io/bbolt/bucket.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bucket.go → vendor/github.com/etcd-io/bbolt/bucket.go
generated
vendored
0
vendor/go.etcd.io/bbolt/cursor.go → vendor/github.com/etcd-io/bbolt/cursor.go
generated
vendored
0
vendor/go.etcd.io/bbolt/cursor.go → vendor/github.com/etcd-io/bbolt/cursor.go
generated
vendored
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user