mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 20:33:17 +00:00
Compare commits
1 Commits
press
...
vfs-refact
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40b9e312c6 |
@@ -28,7 +28,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/press"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -16,10 +16,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Constants
|
||||
|
||||
@@ -799,12 +799,10 @@ func (f *ftpReadCloser) Close() error {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1003,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploadedMu.Lock()
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
o.fs.uploadedMu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
1
backend/press/.gitignore
vendored
1
backend/press/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
test
|
||||
@@ -1,75 +0,0 @@
|
||||
package press
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
)
|
||||
|
||||
// AlgGzip represents gzip compression algorithm
|
||||
type AlgGzip struct {
|
||||
level int
|
||||
blockSize uint32
|
||||
}
|
||||
|
||||
// InitializeGzip initializes the gzip compression Algorithm
|
||||
func InitializeGzip(bs uint32, level int) Algorithm {
|
||||
a := new(AlgGzip)
|
||||
a.blockSize = bs
|
||||
a.level = level
|
||||
return a
|
||||
}
|
||||
|
||||
// GetFileExtension returns file extension
|
||||
func (a *AlgGzip) GetFileExtension() string {
|
||||
return ".gz"
|
||||
}
|
||||
|
||||
// GetHeader returns the Lz4 compression header
|
||||
func (a *AlgGzip) GetHeader() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// GetFooter returns
|
||||
func (a *AlgGzip) GetFooter() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// CompressBlock that compresses a block using gzip
|
||||
func (a *AlgGzip) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) {
|
||||
// Initialize buffer
|
||||
bufw := bufio.NewWriterSize(out, int(a.blockSize+(a.blockSize)>>4))
|
||||
|
||||
// Initialize block writer
|
||||
outw, err := gzip.NewWriterLevel(bufw, a.level)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// Compress block
|
||||
_, err = outw.Write(in)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// Finalize gzip file, flush buffer and return
|
||||
err = outw.Close()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
blockSize := uint32(bufw.Buffered())
|
||||
err = bufw.Flush()
|
||||
|
||||
return blockSize, uint64(len(in)), err
|
||||
}
|
||||
|
||||
// DecompressBlock decompresses Lz4 compressed block
|
||||
func (a *AlgGzip) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) {
|
||||
gzipReader, err := gzip.NewReader(in)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
written, err := io.Copy(out, gzipReader)
|
||||
return int(written), err
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
package press
|
||||
|
||||
// This file implements the LZ4 algorithm.
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/bits"
|
||||
|
||||
"github.com/buengese/xxh32"
|
||||
lz4 "github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
/*
|
||||
Structure of LZ4 header:
|
||||
Flags:
|
||||
Version = 01
|
||||
Independent = 1
|
||||
Block Checksum = 1
|
||||
Content Size = 0
|
||||
Content Checksum = 0
|
||||
Reserved = 0
|
||||
Dictionary ID = 0
|
||||
|
||||
BD byte:
|
||||
Reserved = 0
|
||||
Block Max Size = 101 (or 5; 256kb)
|
||||
Reserved = 0000
|
||||
|
||||
Header checksum byte (xxhash(flags and bd byte) >> 1) & 0xff
|
||||
*/
|
||||
|
||||
// LZ4Header - Header of our LZ4 file
|
||||
//var LZ4Header = []byte{0x04, 0x22, 0x4d, 0x18, 0x70, 0x50, 0x84}
|
||||
|
||||
// LZ4Footer - Footer of our LZ4 file
|
||||
var LZ4Footer = []byte{0x00, 0x00, 0x00, 0x00} // This is just an empty block
|
||||
|
||||
const (
|
||||
frameMagic uint32 = 0x184D2204
|
||||
|
||||
compressedBlockFlag = 1 << 31
|
||||
compressedBlockMask = compressedBlockFlag - 1
|
||||
)
|
||||
|
||||
// AlgLz4 is the Lz4 Compression algorithm
|
||||
type AlgLz4 struct {
|
||||
Header lz4.Header
|
||||
buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
|
||||
}
|
||||
|
||||
// InitializeLz4 creates an Lz4 compression algorithm
|
||||
func InitializeLz4(bs uint32, blockChecksum bool) Algorithm {
|
||||
a := new(AlgLz4)
|
||||
a.Header.Reset()
|
||||
a.Header = lz4.Header{
|
||||
BlockChecksum: blockChecksum,
|
||||
BlockMaxSize: int(bs),
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// GetFileExtension returns file extension
|
||||
func (a *AlgLz4) GetFileExtension() string {
|
||||
return ".lz4"
|
||||
}
|
||||
|
||||
// GetHeader returns the Lz4 compression header
|
||||
func (a *AlgLz4) GetHeader() []byte {
|
||||
// Size is optional.
|
||||
buf := a.buf[:]
|
||||
|
||||
// Set the fixed size data: magic number, block max size and flags.
|
||||
binary.LittleEndian.PutUint32(buf[0:], frameMagic)
|
||||
flg := byte(lz4.Version << 6)
|
||||
flg |= 1 << 5 // No block dependency.
|
||||
if a.Header.BlockChecksum {
|
||||
flg |= 1 << 4
|
||||
}
|
||||
if a.Header.Size > 0 {
|
||||
flg |= 1 << 3
|
||||
}
|
||||
buf[4] = flg
|
||||
buf[5] = blockSizeValueToIndex(a.Header.BlockMaxSize) << 4
|
||||
|
||||
// Current buffer size: magic(4) + flags(1) + block max size (1).
|
||||
n := 6
|
||||
if a.Header.Size > 0 {
|
||||
binary.LittleEndian.PutUint64(buf[n:], a.Header.Size)
|
||||
n += 8
|
||||
}
|
||||
|
||||
// The header checksum includes the flags, block max size and optional Size.
|
||||
buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
|
||||
|
||||
// Header ready, write it out.
|
||||
return buf[0 : n+1]
|
||||
}
|
||||
|
||||
// GetFooter returns
|
||||
func (a *AlgLz4) GetFooter() []byte {
|
||||
return LZ4Footer
|
||||
}
|
||||
|
||||
// CompressBlock that compresses a block using lz4
|
||||
func (a *AlgLz4) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) {
|
||||
if len(in) > 0 {
|
||||
n, err := a.compressBlock(in, out)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
return n, uint64(len(in)), nil
|
||||
}
|
||||
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// compressBlock compresses a block.
|
||||
func (a *AlgLz4) compressBlock(data []byte, dst io.Writer) (uint32, error) {
|
||||
zdata := make([]byte, a.Header.BlockMaxSize) // The compressed block size cannot exceed the input's.
|
||||
var zn int
|
||||
if level := a.Header.CompressionLevel; level != 0 {
|
||||
zn, _ = lz4.CompressBlockHC(data, zdata, level)
|
||||
} else {
|
||||
var hashTable [1 << 16]int
|
||||
zn, _ = lz4.CompressBlock(data, zdata, hashTable[:])
|
||||
}
|
||||
|
||||
var bLen uint32
|
||||
if zn > 0 && zn < len(data) {
|
||||
// Compressible and compressed size smaller than uncompressed: ok!
|
||||
bLen = uint32(zn)
|
||||
zdata = zdata[:zn]
|
||||
} else {
|
||||
// Uncompressed block.
|
||||
bLen = uint32(len(data)) | compressedBlockFlag
|
||||
zdata = data
|
||||
}
|
||||
|
||||
// Write the block.
|
||||
if err := a.writeUint32(bLen, dst); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err := dst.Write(zdata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !a.Header.BlockChecksum {
|
||||
return bLen, nil
|
||||
}
|
||||
checksum := xxh32.ChecksumZero(zdata)
|
||||
if err := a.writeUint32(checksum, dst); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return bLen, nil
|
||||
}
|
||||
|
||||
// writeUint32 writes a uint32 to the underlying writer.
|
||||
func (a *AlgLz4) writeUint32(x uint32, dst io.Writer) error {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, x)
|
||||
_, err := dst.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func blockSizeValueToIndex(size int) byte {
|
||||
return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
|
||||
}
|
||||
|
||||
// DecompressBlock decompresses Lz4 compressed block
|
||||
func (a *AlgLz4) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) {
|
||||
// Get our compressed data
|
||||
var b bytes.Buffer
|
||||
_, err = io.Copy(&b, in)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
zdata := b.Bytes()
|
||||
bLen := binary.LittleEndian.Uint32(zdata[:4])
|
||||
|
||||
if bLen&compressedBlockFlag > 0 {
|
||||
// Uncompressed block.
|
||||
bLen &= compressedBlockMask
|
||||
|
||||
if bLen > BlockSize {
|
||||
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
|
||||
}
|
||||
data := zdata[4 : bLen+4]
|
||||
|
||||
if a.Header.BlockChecksum {
|
||||
checksum := binary.LittleEndian.Uint32(zdata[4+bLen:])
|
||||
|
||||
if h := xxh32.ChecksumZero(data); h != checksum {
|
||||
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
|
||||
}
|
||||
}
|
||||
_, err := out.Write(data)
|
||||
return len(data), err
|
||||
}
|
||||
|
||||
// compressed block
|
||||
if bLen > BlockSize {
|
||||
return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
|
||||
}
|
||||
|
||||
if a.Header.BlockChecksum {
|
||||
checksum := binary.LittleEndian.Uint32(zdata[4+bLen:])
|
||||
|
||||
if h := xxh32.ChecksumZero(zdata[4 : bLen+4]); h != checksum {
|
||||
return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
|
||||
}
|
||||
}
|
||||
|
||||
data := make([]byte, BlockSize)
|
||||
n, err = lz4.UncompressBlock(zdata[4:bLen+4], data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = out.Write(data[:n])
|
||||
return n, err
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package press
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// AlgXZ represents the XZ compression algorithm
|
||||
type AlgXZ struct {
|
||||
blockSize uint32
|
||||
config xz.WriterConfig
|
||||
}
|
||||
|
||||
// InitializeXZ creates an Lz4 compression algorithm
|
||||
func InitializeXZ(bs uint32) Algorithm {
|
||||
a := new(AlgXZ)
|
||||
a.blockSize = bs
|
||||
a.config = xz.WriterConfig{}
|
||||
return a
|
||||
}
|
||||
|
||||
// GetFileExtension returns file extension
|
||||
func (a *AlgXZ) GetFileExtension() string {
|
||||
return ".xz"
|
||||
}
|
||||
|
||||
// GetHeader returns the Lz4 compression header
|
||||
func (a *AlgXZ) GetHeader() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// GetFooter returns
|
||||
func (a *AlgXZ) GetFooter() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// CompressBlock that compresses a block using lz4
|
||||
func (a *AlgXZ) CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error) {
|
||||
// Initialize buffer
|
||||
bufw := bufio.NewWriterSize(out, int(a.blockSize+(a.blockSize)>>4))
|
||||
|
||||
// Initialize block writer
|
||||
outw, err := a.config.NewWriter(bufw)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// Compress block
|
||||
_, err = outw.Write(in)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// Finalize gzip file, flush buffer and return
|
||||
err = outw.Close()
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
blockSize := uint32(bufw.Buffered())
|
||||
err = bufw.Flush()
|
||||
|
||||
return blockSize, uint64(len(in)), err
|
||||
}
|
||||
|
||||
// DecompressBlock decompresses Lz4 compressed block
|
||||
func (a *AlgXZ) DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error) {
|
||||
xzReader, err := xz.NewReader(in)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
written, err := io.Copy(out, xzReader)
|
||||
return int(written), err
|
||||
}
|
||||
@@ -1,526 +0,0 @@
|
||||
// Package press provides wrappers for Fs and Object which implement compression.
|
||||
// This file is the backend implementation for seekable compression.
|
||||
package press
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Compression modes
|
||||
const (
|
||||
Uncompressed = -1
|
||||
LZ4 = 2
|
||||
Gzip = 4
|
||||
XZ = 8
|
||||
)
|
||||
|
||||
// Errors
|
||||
var (
|
||||
ErrMetadataCorrupted = errors.New("metadata may have been corrupted")
|
||||
)
|
||||
|
||||
// DEBUG - flag for debug mode
|
||||
const DEBUG = false
|
||||
|
||||
// Compression is a struct containing configurable variables (what used to be constants)
|
||||
type Compression struct {
|
||||
CompressionMode int // Compression mode
|
||||
Algorithm Algorithm
|
||||
BlockSize uint32 // Size of blocks. Higher block size means better compression but more download bandwidth needed for small downloads
|
||||
// ~1MB is recommended for xz, while ~128KB is recommended for gzip and lz4
|
||||
HeuristicBytes int64 // Bytes to perform gzip heuristic on to determine whether a file should be compressed
|
||||
NumThreads int // Number of threads to use for compression
|
||||
MaxCompressionRatio float64 // Maximum compression ratio for a file to be considered compressible
|
||||
BinPath string // Path to compression binary. This is used for all non-gzip compression.
|
||||
}
|
||||
|
||||
// Algorithm is the main compression Algorithm Interface
|
||||
type Algorithm interface {
|
||||
GetHeader() []byte
|
||||
|
||||
GetFileExtension() string
|
||||
|
||||
CompressBlock(in []byte, out io.Writer) (compressedSize uint32, uncompressedSize uint64, err error)
|
||||
|
||||
DecompressBlock(in io.Reader, out io.Writer, BlockSize uint32) (n int, err error)
|
||||
|
||||
GetFooter() []byte
|
||||
}
|
||||
|
||||
// NewCompressionPreset creates a Compression object with a preset mode/bs
|
||||
func NewCompressionPreset(preset string) (*Compression, error) {
|
||||
switch preset {
|
||||
case "lz4":
|
||||
alg := InitializeLz4(262144, true)
|
||||
return NewCompression(LZ4, alg, 262144) // LZ4 compression (very fast)
|
||||
case "gzip":
|
||||
alg := InitializeGzip(131072, 6)
|
||||
return NewCompression(Gzip, alg, 131070) // GZIP-default compression (medium)*/
|
||||
case "xz":
|
||||
alg := InitializeXZ(1048576)
|
||||
return NewCompression(XZ, alg, 1048576) // XZ compression (strong compression)*/
|
||||
}
|
||||
return nil, errors.New("Compression mode doesn't exist")
|
||||
}
|
||||
|
||||
// NewCompressionPresetNumber creates a Compression object with a preset mode/bs
|
||||
func NewCompressionPresetNumber(preset int) (*Compression, error) {
|
||||
switch preset {
|
||||
case LZ4:
|
||||
alg := InitializeLz4(262144, true)
|
||||
return NewCompression(LZ4, alg, 262144) // LZ4 compression (very fast)
|
||||
case Gzip:
|
||||
alg := InitializeGzip(131072, 6)
|
||||
return NewCompression(Gzip, alg, 131070) // GZIP-default compression (medium)*/
|
||||
case XZ:
|
||||
alg := InitializeXZ(1048576)
|
||||
return NewCompression(XZ, alg, 1048576) // XZ compression (strong compression)*/
|
||||
}
|
||||
return nil, errors.New("Compression mode doesn't exist")
|
||||
}
|
||||
|
||||
// NewCompression creates a Compression object with some default configuration values
|
||||
func NewCompression(mode int, alg Algorithm, bs uint32) (*Compression, error) {
|
||||
return NewCompressionAdvanced(mode, alg, bs, 1048576, 12, 0.9)
|
||||
}
|
||||
|
||||
// NewCompressionAdvanced creates a Compression object
|
||||
func NewCompressionAdvanced(mode int, alg Algorithm, bs uint32, hb int64, threads int, mcr float64) (c *Compression, err error) {
|
||||
// Set vars
|
||||
c = new(Compression)
|
||||
c.Algorithm = alg
|
||||
c.CompressionMode = mode
|
||||
c.BlockSize = bs
|
||||
c.HeuristicBytes = hb
|
||||
c.NumThreads = threads
|
||||
c.MaxCompressionRatio = mcr
|
||||
return c, err
|
||||
}
|
||||
|
||||
/*** UTILITY FUNCTIONS ***/
|
||||
|
||||
// GetFileExtension gets a file extension for current compression mode
|
||||
func (c *Compression) GetFileExtension() string {
|
||||
return c.Algorithm.GetFileExtension()
|
||||
}
|
||||
|
||||
// GetFileCompressionInfo gets a file extension along with compressibility of file
|
||||
func (c *Compression) GetFileCompressionInfo(reader io.Reader) (compressable bool, extension string, err error) {
|
||||
// Use our compression algorithm to do a heuristic on the first few bytes
|
||||
var emulatedBlock, emulatedBlockCompressed bytes.Buffer
|
||||
_, err = io.CopyN(&emulatedBlock, reader, c.HeuristicBytes)
|
||||
if err != nil && err != io.EOF {
|
||||
return false, "", err
|
||||
}
|
||||
compressedSize, uncompressedSize, err := c.Algorithm.CompressBlock(emulatedBlock.Bytes(), &emulatedBlockCompressed)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
compressionRatio := float64(compressedSize) / float64(uncompressedSize)
|
||||
|
||||
// If the data is not compressible, return so
|
||||
if compressionRatio > c.MaxCompressionRatio {
|
||||
return false, ".bin", nil
|
||||
}
|
||||
|
||||
// If the file is compressible, select file extension based on compression mode
|
||||
return true, c.Algorithm.GetFileExtension(), nil
|
||||
}
|
||||
|
||||
/*** MAIN COMPRESSION INTERFACE ***/
|
||||
// compressionResult represents the result of compression for a single block (gotten by a single thread)
|
||||
type compressionResult struct {
|
||||
buffer *bytes.Buffer
|
||||
n uint64
|
||||
err error
|
||||
}
|
||||
|
||||
// CompressFileReturningBlockData compresses a file returning the block data for that file.
|
||||
func (c *Compression) CompressFileReturningBlockData(in io.Reader, out io.Writer) (blockData []uint32, err error) {
|
||||
// Initialize buffered writer
|
||||
bufw := bufio.NewWriterSize(out, int((c.BlockSize+(c.BlockSize)>>4)*uint32(c.NumThreads)))
|
||||
|
||||
// Get blockData, copy over header, add length of header to blockData
|
||||
blockData = make([]uint32, 0)
|
||||
header := c.Algorithm.GetHeader()
|
||||
_, err = bufw.Write(header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockData = append(blockData, uint32(len(header)))
|
||||
|
||||
// Compress blocks
|
||||
for {
|
||||
// Loop through threads, spawning a go procedure for each thread. If we get eof on one thread, set eofAt to that thread and break
|
||||
compressionResults := make([]chan compressionResult, c.NumThreads)
|
||||
eofAt := -1
|
||||
for i := 0; i < c.NumThreads; i++ {
|
||||
// Create thread channel and allocate buffer to pass to thread
|
||||
compressionResults[i] = make(chan compressionResult)
|
||||
var inputBuffer bytes.Buffer
|
||||
_, err = io.CopyN(&inputBuffer, in, int64(c.BlockSize))
|
||||
if err == io.EOF {
|
||||
eofAt = i
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Run thread
|
||||
go func(i int, in []byte) {
|
||||
// Initialize thread writer and result struct
|
||||
var res compressionResult
|
||||
var buffer bytes.Buffer
|
||||
|
||||
// Compress block
|
||||
_, n, err := c.Algorithm.CompressBlock(in, &buffer)
|
||||
if err != nil && err != io.EOF { // This errored out.
|
||||
res.buffer = nil
|
||||
res.n = 0
|
||||
res.err = err
|
||||
compressionResults[i] <- res
|
||||
return
|
||||
}
|
||||
// Pass our data back to the main thread as a compression result
|
||||
res.buffer = &buffer
|
||||
res.n = n
|
||||
res.err = err
|
||||
compressionResults[i] <- res
|
||||
}(i, inputBuffer.Bytes())
|
||||
// If we have reached eof, we don't need more threads
|
||||
if eofAt != -1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Process writers in order
|
||||
for i := 0; i < c.NumThreads; i++ {
|
||||
if compressionResults[i] != nil {
|
||||
// Get current compression result, get buffer, and copy buffer over to output
|
||||
res := <-compressionResults[i]
|
||||
close(compressionResults[i])
|
||||
if res.buffer == nil {
|
||||
return nil, res.err
|
||||
}
|
||||
blockSize := uint32(res.buffer.Len())
|
||||
|
||||
_, err = io.Copy(bufw, res.buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if DEBUG {
|
||||
fmt.Printf("%d %d\n", res.n, blockSize)
|
||||
}
|
||||
|
||||
// Append block size to block data
|
||||
blockData = append(blockData, blockSize)
|
||||
|
||||
// If this is the last block, add the raw size of the last block to the end of blockData and break
|
||||
if eofAt == i {
|
||||
if DEBUG {
|
||||
log.Printf("%d %d %d\n", res.n, byte(res.n%256), byte(res.n/256))
|
||||
}
|
||||
|
||||
blockData = append(blockData, uint32(res.n))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get number of bytes written in this block (they should all be in the bufio buffer), then close gzip and flush buffer
|
||||
err = bufw.Flush()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If eof happened, break
|
||||
if eofAt != -1 {
|
||||
if DEBUG {
|
||||
log.Printf("%d", eofAt)
|
||||
log.Printf("%v", blockData)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Write footer and flush
|
||||
footer := c.Algorithm.GetFooter()
|
||||
|
||||
_, err = bufw.Write(footer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = bufw.Flush()
|
||||
|
||||
// Return
|
||||
return blockData, err
|
||||
}
|
||||
|
||||
/*** BLOCK DECOMPRESSION FUNCTIONS ***/
|
||||
|
||||
// Wrapper function for decompressBlock that implements multithreading
|
||||
// decompressionResult represents the result of decompressing a block
|
||||
type decompressionResult struct {
|
||||
err error
|
||||
buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func (d *Decompressor) decompressBlockRangeMultithreaded(in io.Reader, out io.Writer, startingBlock uint32) (n int, err error) {
|
||||
// First, use bufio.Reader to reduce the number of reads and bufio.Writer to reduce the number of writes
|
||||
bufin := bufio.NewReader(in)
|
||||
bufout := bufio.NewWriter(out)
|
||||
|
||||
// Decompress each block individually.
|
||||
currBatch := startingBlock // Block # of start of current batch of blocks
|
||||
totalBytesCopied := 0
|
||||
for {
|
||||
// Loop through threads
|
||||
eofAt := -1
|
||||
decompressionResults := make([]chan decompressionResult, d.c.NumThreads)
|
||||
|
||||
for i := 0; i < d.c.NumThreads; i++ {
|
||||
// Get currBlock
|
||||
currBlock := currBatch + uint32(i)
|
||||
|
||||
// Create channel
|
||||
decompressionResults[i] = make(chan decompressionResult)
|
||||
|
||||
// Check if we've reached EOF
|
||||
if currBlock >= d.numBlocks {
|
||||
eofAt = i
|
||||
break
|
||||
}
|
||||
|
||||
// Get block to decompress
|
||||
var compressedBlock bytes.Buffer
|
||||
var err error
|
||||
n, err := io.CopyN(&compressedBlock, bufin, d.blockStarts[currBlock+1]-d.blockStarts[currBlock])
|
||||
if err != nil || n == 0 { // End of stream
|
||||
eofAt = i
|
||||
break
|
||||
}
|
||||
|
||||
// Spawn thread to decompress block
|
||||
if DEBUG {
|
||||
log.Printf("Spawning %d", i)
|
||||
}
|
||||
go func(i int, currBlock uint32, in io.Reader) {
|
||||
var block bytes.Buffer
|
||||
var res decompressionResult
|
||||
|
||||
// Decompress block
|
||||
_, res.err = d.c.Algorithm.DecompressBlock(in, &block, d.c.BlockSize)
|
||||
res.buffer = &block
|
||||
decompressionResults[i] <- res
|
||||
}(i, currBlock, &compressedBlock)
|
||||
}
|
||||
if DEBUG {
|
||||
log.Printf("Eof at %d", eofAt)
|
||||
}
|
||||
|
||||
// Process results
|
||||
for i := 0; i < d.c.NumThreads; i++ {
|
||||
// If we got EOF, return
|
||||
if eofAt == i {
|
||||
return totalBytesCopied, bufout.Flush() // Flushing bufout is needed to prevent us from getting all nulls
|
||||
}
|
||||
|
||||
// Get result and close
|
||||
res := <-decompressionResults[i]
|
||||
close(decompressionResults[i])
|
||||
if res.err != nil {
|
||||
return totalBytesCopied, res.err
|
||||
}
|
||||
|
||||
// Copy to output and add to total bytes copied
|
||||
n, err := io.Copy(bufout, res.buffer)
|
||||
totalBytesCopied += int(n)
|
||||
if err != nil {
|
||||
return totalBytesCopied, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add NumThreads to currBatch
|
||||
currBatch += uint32(d.c.NumThreads)
|
||||
}
|
||||
}
|
||||
|
||||
/*** MAIN DECOMPRESSION INTERFACE ***/
|
||||
|
||||
// Decompressor is the ReadSeeker implementation for decompression
|
||||
type Decompressor struct {
|
||||
cursorPos *int64 // The current location we have seeked to
|
||||
blockStarts []int64 // The start of each block. These will be recovered from the block sizes
|
||||
numBlocks uint32 // Number of blocks
|
||||
decompressedSize int64 // Decompressed size of the file.
|
||||
in io.ReadSeeker // Input
|
||||
c *Compression // Compression options
|
||||
}
|
||||
|
||||
// Parses block data. Returns the number of blocks, the block start locations for each block, and the decompressed size of the entire file.
|
||||
func parseBlockData(blockData []uint32, BlockSize uint32) (numBlocks uint32, blockStarts []int64, decompressedSize int64) {
|
||||
// Parse the block data
|
||||
blockDataLen := len(blockData)
|
||||
numBlocks = uint32(blockDataLen - 1)
|
||||
if DEBUG {
|
||||
log.Printf("%v\n", blockData)
|
||||
log.Printf("metadata len, numblocks = %d, %d", blockDataLen, numBlocks)
|
||||
}
|
||||
blockStarts = make([]int64, numBlocks+1) // Starts with start of first block (and end of header), ends with end of last block
|
||||
currentBlockPosition := int64(0)
|
||||
for i := uint32(0); i < numBlocks; i++ { // Loop through block data, getting starts of blocks.
|
||||
currentBlockSize := blockData[i]
|
||||
currentBlockPosition += int64(currentBlockSize)
|
||||
blockStarts[i] = currentBlockPosition
|
||||
}
|
||||
blockStarts[numBlocks] = currentBlockPosition // End of last block
|
||||
|
||||
//log.Printf("Block Starts: %v\n", d.blockStarts)
|
||||
|
||||
numBlocks-- // Subtract 1 from number of blocks because our header technically isn't a block
|
||||
|
||||
// Get uncompressed size of last block and derive uncompressed size of file
|
||||
lastBlockRawSize := blockData[blockDataLen-1]
|
||||
decompressedSize = int64(numBlocks-1)*int64(BlockSize) + int64(lastBlockRawSize)
|
||||
if DEBUG {
|
||||
log.Printf("Decompressed size = %d", decompressedSize)
|
||||
}
|
||||
|
||||
return numBlocks, blockStarts, decompressedSize
|
||||
}
|
||||
|
||||
// Initializes decompressor with the block data specified.
|
||||
func (d *Decompressor) initWithBlockData(c *Compression, in io.ReadSeeker, size int64, blockData []uint32) (err error) {
|
||||
// Copy over compression object
|
||||
d.c = c
|
||||
|
||||
// Initialize cursor position
|
||||
d.cursorPos = new(int64)
|
||||
|
||||
// Parse the block data
|
||||
d.numBlocks, d.blockStarts, d.decompressedSize = parseBlockData(blockData, d.c.BlockSize)
|
||||
|
||||
// Initialize cursor position value and copy over reader
|
||||
*d.cursorPos = 0
|
||||
_, err = in.Seek(0, io.SeekStart)
|
||||
d.in = in
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads data using a decompressor
|
||||
func (d Decompressor) Read(p []byte) (int, error) {
|
||||
if DEBUG {
|
||||
log.Printf("Cursor pos before: %d\n", *d.cursorPos)
|
||||
}
|
||||
// Check if we're at the end of the file or before the beginning of the file
|
||||
if *d.cursorPos >= d.decompressedSize || *d.cursorPos < 0 {
|
||||
if DEBUG {
|
||||
log.Println("Out of bounds EOF")
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Get block range to read
|
||||
blockNumber := *d.cursorPos / int64(d.c.BlockSize)
|
||||
blockStart := d.blockStarts[blockNumber] // Start position of blocks to read
|
||||
dataOffset := *d.cursorPos % int64(d.c.BlockSize) // Offset of data to read in blocks to read
|
||||
bytesToRead := len(p) // Number of bytes to read
|
||||
blocksToRead := (int64(bytesToRead)+dataOffset)/int64(d.c.BlockSize) + 1 // Number of blocks to read
|
||||
returnEOF := false
|
||||
if blockNumber+blocksToRead > int64(d.numBlocks) { // Overflowed the last block
|
||||
blocksToRead = int64(d.numBlocks) - blockNumber
|
||||
returnEOF = true
|
||||
}
|
||||
blockEnd := d.blockStarts[blockNumber+blocksToRead] // Start of the block after the last block we want to get is the end of the last block we want to get
|
||||
blockLen := blockEnd - blockStart
|
||||
|
||||
// Read compressed block range into buffer
|
||||
var compressedBlocks bytes.Buffer
|
||||
_, err := d.in.Seek(blockStart, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n1, err := io.CopyN(&compressedBlocks, d.in, blockLen)
|
||||
if DEBUG {
|
||||
log.Printf("block # = %d @ %d <- %d, len %d, copied %d bytes", blockNumber, blockStart, *d.cursorPos, blockLen, n1)
|
||||
}
|
||||
if err != nil {
|
||||
if DEBUG {
|
||||
log.Println("Copy Error")
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Decompress block range
|
||||
var b bytes.Buffer
|
||||
n, err := d.decompressBlockRangeMultithreaded(&compressedBlocks, &b, uint32(blockNumber))
|
||||
if err != nil {
|
||||
log.Println("Decompression error")
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Calculate bytes read
|
||||
readOverflow := *d.cursorPos + int64(bytesToRead) - d.decompressedSize
|
||||
if readOverflow < 0 {
|
||||
readOverflow = 0
|
||||
}
|
||||
bytesRead := int64(bytesToRead) - readOverflow
|
||||
if DEBUG {
|
||||
log.Printf("Read offset = %d, overflow = %d", dataOffset, readOverflow)
|
||||
log.Printf("Decompressed %d bytes; read %d out of %d bytes\n", n, bytesRead, bytesToRead)
|
||||
// log.Printf("%v", b.Bytes())
|
||||
}
|
||||
|
||||
// If we read 0 bytes, we reached the end of the file
|
||||
if bytesRead == 0 {
|
||||
log.Println("EOF")
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Copy from buffer+offset to p
|
||||
_, err = io.CopyN(ioutil.Discard, &b, dataOffset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = b.Read(p) // Note: everything after bytesToRead bytes will be discarded; we are returning bytesToRead instead of n
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Increment cursor position and return
|
||||
*d.cursorPos += bytesRead
|
||||
if returnEOF {
|
||||
if DEBUG {
|
||||
log.Println("EOF")
|
||||
}
|
||||
return int(bytesRead), io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Seek seeks to a location in compressed stream
|
||||
func (d Decompressor) Seek(offset int64, whence int) (int64, error) {
|
||||
// Seek to offset in cursorPos
|
||||
if whence == io.SeekStart {
|
||||
*d.cursorPos = offset
|
||||
} else if whence == io.SeekCurrent {
|
||||
*d.cursorPos += offset
|
||||
} else if whence == io.SeekEnd {
|
||||
*d.cursorPos = d.decompressedSize + offset
|
||||
}
|
||||
|
||||
// Return
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
// DecompressFileExtData decompresses a file using external block data. Argument "size" is very useful here.
|
||||
func (c *Compression) DecompressFileExtData(in io.ReadSeeker, size int64, blockData []uint32) (FileHandle io.ReadSeeker, decompressedSize int64, err error) {
|
||||
var decompressor Decompressor
|
||||
err = decompressor.initWithBlockData(c, in, size, blockData)
|
||||
return decompressor, decompressor.decompressedSize, err
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package press
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const TestStringSmall = "The quick brown fox jumps over the lazy dog."
|
||||
const TestSizeLarge = 2097152 // 2 megabytes
|
||||
|
||||
// Tests compression and decompression for a preset
|
||||
func testCompressDecompress(t *testing.T, preset string, testString string) {
|
||||
// Create compression instance
|
||||
comp, err := NewCompressionPreset(preset)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Open files and hashers
|
||||
testFile := strings.NewReader(testString)
|
||||
testFileHasher := md5.New()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
compressedFile, err := ioutil.TempFile(os.TempDir(), "rclone_compression_test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
outHasher := md5.New()
|
||||
|
||||
// Compress file and hash it (size doesn't matter here)
|
||||
testFileReader, testFileWriter := io.Pipe()
|
||||
go func() {
|
||||
_, err := io.Copy(io.MultiWriter(testFileHasher, testFileWriter), testFile)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to write compressed file")
|
||||
}
|
||||
err = testFileWriter.Close()
|
||||
if err != nil {
|
||||
t.Log("Failed to close compressed file")
|
||||
}
|
||||
}()
|
||||
var blockData []uint32
|
||||
blockData, err = comp.CompressFileReturningBlockData(testFileReader, compressedFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Compression failed with error: %v", err)
|
||||
}
|
||||
testFileHash := testFileHasher.Sum(nil)
|
||||
|
||||
// Get the size, seek to the beginning of the compressed file
|
||||
size, err := compressedFile.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = compressedFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Compressed size: %d\n", size)
|
||||
|
||||
// Decompress file into a hasher
|
||||
var FileHandle io.ReadSeeker
|
||||
var decompressedSize int64
|
||||
FileHandle, decompressedSize, err = comp.DecompressFileExtData(compressedFile, size, blockData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Decompressed size: %d\n", decompressedSize)
|
||||
bufr := bufio.NewReaderSize(FileHandle, 12345678)
|
||||
_, err = io.Copy(outHasher, bufr)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
decompressedFileHash := outHasher.Sum(nil)
|
||||
|
||||
// Clean up
|
||||
err = compressedFile.Close()
|
||||
if err != nil {
|
||||
t.Log("Warning: cannot close compressed test file")
|
||||
}
|
||||
err = os.Remove(compressedFile.Name())
|
||||
if err != nil {
|
||||
t.Log("Warning: cannot remove compressed test file")
|
||||
}
|
||||
|
||||
// Compare hashes
|
||||
if !bytes.Equal(testFileHash, decompressedFileHash) {
|
||||
t.Logf("Hash of original file: %x\n", testFileHash)
|
||||
t.Logf("Hash of recovered file: %x\n", decompressedFileHash)
|
||||
t.Fatal("Hashes do not match!")
|
||||
}
|
||||
}
|
||||
|
||||
// Tests both small and large strings for a preset
|
||||
func testSmallLarge(t *testing.T, preset string) {
|
||||
testStringLarge := getCompressibleString(TestSizeLarge)
|
||||
t.Run("TestSmall", func(t *testing.T) {
|
||||
testCompressDecompress(t, preset, TestStringSmall)
|
||||
})
|
||||
t.Run("TestLarge", func(t *testing.T) {
|
||||
testCompressDecompress(t, preset, testStringLarge)
|
||||
})
|
||||
}
|
||||
|
||||
// Gets a compressible string
|
||||
func getCompressibleString(size int) string {
|
||||
// Get pseudorandom bytes
|
||||
prbytes := make([]byte, size*3/4+16)
|
||||
prsource := rand.New(rand.NewSource(0))
|
||||
prsource.Read(prbytes)
|
||||
// Encode in base64
|
||||
encoding := base64.NewEncoding("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/")
|
||||
return encoding.EncodeToString(prbytes)[:size]
|
||||
}
|
||||
|
||||
func TestCompression(t *testing.T) {
|
||||
testCases := []string{"lz4", "gzip", "xz"}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc, func(t *testing.T) {
|
||||
testSmallLarge(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,129 +0,0 @@
|
||||
// Test Crypt filesystem interface
|
||||
package press
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestRemoteLz4 tests LZ4 compression
|
||||
func TestRemoteLz4(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-press-test-lz4")
|
||||
name := "TestPressLz4"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "press"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "lz4"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
func TestRemoteGzip(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-press-test-gzip")
|
||||
name := "TestPressGzip"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "press"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestRemoteXz tests XZ compression
|
||||
func TestRemoteXz(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-press-test-xz")
|
||||
name := "TestPressXz"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "press"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "xz"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -110,7 +110,7 @@ def read_doc(doc):
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
|
||||
# Interpret provider shortcode
|
||||
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)
|
||||
|
||||
@@ -371,12 +371,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package mount
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -42,12 +41,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
@@ -74,11 +73,7 @@ func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written
|
||||
var n int
|
||||
var err error
|
||||
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
|
||||
if f.h.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || f.h.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
} else {
|
||||
n, err = f.h.Write(data)
|
||||
}
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
return uint32(n), translateError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -44,11 +45,11 @@ var (
|
||||
func RunTests(t *testing.T, fn MountFn) {
|
||||
mountFn = fn
|
||||
flag.Parse()
|
||||
cacheModes := []vfs.CacheMode{
|
||||
vfs.CacheModeOff,
|
||||
vfs.CacheModeMinimal,
|
||||
vfs.CacheModeWrites,
|
||||
vfs.CacheModeFull,
|
||||
cacheModes := []vfscommon.CacheMode{
|
||||
vfscommon.CacheModeOff,
|
||||
vfscommon.CacheModeMinimal,
|
||||
vfscommon.CacheModeWrites,
|
||||
vfscommon.CacheModeFull,
|
||||
}
|
||||
run = newRun()
|
||||
for _, cacheMode := range cacheModes {
|
||||
@@ -207,7 +208,7 @@ func (r *Run) umount() {
|
||||
}
|
||||
|
||||
// cacheMode flushes the VFS and changes the CacheMode
|
||||
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
|
||||
func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping cacheMode")
|
||||
return
|
||||
|
||||
@@ -5,10 +5,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileNoWrite tests writing a file with no write()'s to it
|
||||
@@ -91,7 +90,7 @@ func TestWriteFileFsync(t *testing.T) {
|
||||
func TestWriteFileDup(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
@@ -136,7 +135,7 @@ func TestWriteFileDup(t *testing.T) {
|
||||
func TestWriteFileAppend(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,10 +6,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
@@ -45,7 +44,7 @@ func TestWriteFileDoubleClose(t *testing.T) {
|
||||
|
||||
// write to the other dup
|
||||
_, err = unix.Write(fd2, buf)
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
// produces an error if cache mode < writes
|
||||
assert.Error(t, err, "input/output error")
|
||||
} else {
|
||||
|
||||
@@ -72,7 +72,6 @@ Features
|
||||
* [Cache](/cache/) backend
|
||||
* [Chunking](/chunker/) backend
|
||||
* [Union](/union/) backend
|
||||
* Experimental [Compression](/press/) backend
|
||||
* Optional FUSE mount ([rclone mount](/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[dlna](/commands/rclone_serve_dlna/)
|
||||
|
||||
@@ -339,7 +339,3 @@ Contributors
|
||||
* Tim Gallant <me@timgallant.us>
|
||||
* Frederick Zhang <frederick888@tsundere.moe>
|
||||
* valery1707 <valery1707@gmail.com>
|
||||
* Yves G <theYinYeti@yalis.fr>
|
||||
* Shing Kit Chan <chanshingkit@gmail.com>
|
||||
* Franklyn Tackitt <franklyn@tackitt.net>
|
||||
* Robert-André Mauchin <zebob.m@gmail.com>
|
||||
|
||||
@@ -9,8 +9,8 @@ date: "2020-02-01"
|
||||
## v1.51.0 - 2020-02-01
|
||||
|
||||
* New backends
|
||||
* [Memory](/memory/) (Nick Craig-Wood)
|
||||
* [Sugarsync](/sugarsync/) (Nick Craig-Wood)
|
||||
* [Memory](/memory) (Nick Craig-Wood)
|
||||
* [Sugarsync](/sugarsync) (Nick Craig-Wood)
|
||||
* New Features
|
||||
* Adjust all backends to have `--backend-encoding` parameter (Nick Craig-Wood)
|
||||
* this enables the encoding for special characters to be adjusted or disabled
|
||||
@@ -165,9 +165,9 @@ date: "2020-02-01"
|
||||
## v1.50.0 - 2019-10-26
|
||||
|
||||
* New backends
|
||||
* [Citrix Sharefile](/sharefile/) (Nick Craig-Wood)
|
||||
* [Chunker](/chunker/) - an overlay backend to split files into smaller parts (Ivan Andreev)
|
||||
* [Mail.ru Cloud](/mailru/) (Ivan Andreev)
|
||||
* [Citrix Sharefile](/sharefile) (Nick Craig-Wood)
|
||||
* [Chunker](/chunker) - an overlay backend to split files into smaller parts (Ivan Andreev)
|
||||
* [Mail.ru Cloud](/mailru) (Ivan Andreev)
|
||||
* New Features
|
||||
* encodings (Fabian Möller & Nick Craig-Wood)
|
||||
* All backends now use file name encoding to ensure any file name can be written to any backend.
|
||||
@@ -320,7 +320,7 @@ date: "2020-02-01"
|
||||
|
||||
* New backends
|
||||
* [1fichier](/fichier/) (Laura Hausmann)
|
||||
* [Google Photos](/googlephotos/) (Nick Craig-Wood)
|
||||
* [Google Photos](/googlephotos) (Nick Craig-Wood)
|
||||
* [Putio](/putio/) (Cenk Alti)
|
||||
* [premiumize.me](/premiumizeme/) (Nick Craig-Wood)
|
||||
* New Features
|
||||
|
||||
@@ -736,20 +736,6 @@ When the limit is reached all transfers will stop immediately.
|
||||
|
||||
Rclone will exit with exit code 8 if the transfer limit is reached.
|
||||
|
||||
### --cutoff-mode=hard|soft|cautious ###
|
||||
|
||||
This modifies the behavior of `--max-transfer`
|
||||
Defaults to `--cutoff-mode=hard`.
|
||||
|
||||
Specifiying `--cutoff-mode=hard` will stop transferring immediately
|
||||
when Rclone reaches the limit.
|
||||
|
||||
Specifiying `--cutoff-mode=soft` will stop starting new transfers
|
||||
when Rclone reaches the limit.
|
||||
|
||||
Specifiying `--cutoff-mode=cautious` will try to prevent Rclone
|
||||
from reaching the limit.
|
||||
|
||||
### --modify-window=TIME ###
|
||||
|
||||
When checking whether a file has been modified, this is the maximum
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
---
|
||||
title: "Press"
|
||||
description: "Compression Remote"
|
||||
date: "2019-05-12"
|
||||
---
|
||||
|
||||
Press (Experimental)
|
||||
-----------------------------------------
|
||||
|
||||
The `press` remote adds compression to another remote. It is best used with remotes containing
|
||||
many large compressible files or on top of other remotes like crypt.
|
||||
|
||||
Please read the [warnings](#warnings) before using this remote.
|
||||
|
||||
To use this remote, all you need to do is specify another remote and a compression mode to use:
|
||||
|
||||
```
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
remote_to_press sometype
|
||||
|
||||
e) Edit existing remote
|
||||
$ rclone config
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
name> press
|
||||
...
|
||||
8 / Compress a remote
|
||||
\ "press"
|
||||
...
|
||||
Storage> press
|
||||
** See help for press backend at: https://rclone.org/press/ **
|
||||
|
||||
Remote to compress.
|
||||
Enter a string value. Press Enter for the default ("")
|
||||
remote> remote_to_press
|
||||
Compression mode. XZ compression mode requires the xz binary to be in PATH.
|
||||
Enter a string value. Press Enter for the default ("gzip-min").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Fast, real-time compression with reasonable compression ratios.
|
||||
\ "lz4"
|
||||
2 / Google's compression algorithm. Slightly faster and larger than LZ4.
|
||||
\ "snappy"
|
||||
3 / Standard gzip compression with fastest parameters.
|
||||
\ "gzip-min"
|
||||
4 / Standard gzip compression with default parameters.
|
||||
\ "gzip-default"
|
||||
5 / Slow but powerful compression with reasonable speed.
|
||||
\ "xz-min"
|
||||
6 / Slowest but best compression.
|
||||
\ "xz-default"
|
||||
compression_mode> gzip-min
|
||||
```
|
||||
|
||||
### Compression Modes
|
||||
Currently there are four compression algorithms supported: lz4, snappy, gzip, and xz.
|
||||
Gzip and xz are further divided into two modes: "min" with less compression and "default" with more.
|
||||
Currently, xz modes are only supported if there is an xz binary in your system's $PATH.
|
||||
Depending on your operating system, the methods of installing this binary vary. This may be changed in
|
||||
future updates.
|
||||
|
||||
### Warnings
|
||||
|
||||
#### Filetype
|
||||
If you open a remote wrapped by press, you will see that there are many files with an extension corresponding to
|
||||
the compression algorithm you chose. These files, with the exception of snappy files, are standard files that
|
||||
can be opened by various archive programs, but they have some hidden metadata that allows them to be used by rclone.
|
||||
While you may download and decompress these files at will, do **not** upload any compressed files to a wrapped remote
|
||||
through any other means than rclone. This will upload files that do not contain metadata and **will** cause unexpected behavior.
|
||||
|
||||
#### Experimental
|
||||
This remote is currently **experimental**. Things may break and data may be lost. Anything you do with this remote is
|
||||
at your own risk. Please understand the risks associated with using experimental code and don't use this remote in
|
||||
critical applications.
|
||||
@@ -9,7 +9,6 @@ date: "2018-03-05"
|
||||
If rclone is run with the `--rc` flag then it starts an http server
|
||||
which can be used to remote control rclone.
|
||||
|
||||
|
||||
If you just want to run a remote control then see the [rcd command](/commands/rclone_rcd/).
|
||||
|
||||
**NB** this is experimental and everything here is subject to change!
|
||||
@@ -86,12 +85,6 @@ style.
|
||||
|
||||
Default Off.
|
||||
|
||||
### --rc-enable-metrics
|
||||
|
||||
Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`.
|
||||
|
||||
Default Off.
|
||||
|
||||
### --rc-web-gui
|
||||
|
||||
Set this flag to serve the default web gui on the same port as rclone.
|
||||
|
||||
@@ -61,10 +61,7 @@ func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name str
|
||||
exit: make(chan struct{}),
|
||||
avg: 0,
|
||||
lpTime: time.Now(),
|
||||
max: -1,
|
||||
}
|
||||
if fs.Config.CutoffMode == fs.CutoffModeHard {
|
||||
acc.max = int64((fs.Config.MaxTransfer))
|
||||
max: int64(fs.Config.MaxTransfer),
|
||||
}
|
||||
go acc.averageLoop()
|
||||
stats.inProgress.set(acc.name, acc)
|
||||
|
||||
@@ -197,12 +197,9 @@ func TestAccountAccounter(t *testing.T) {
|
||||
|
||||
func TestAccountMaxTransfer(t *testing.T) {
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
}()
|
||||
|
||||
in := ioutil.NopCloser(bytes.NewBuffer(make([]byte, 100)))
|
||||
@@ -221,20 +218,6 @@ func TestAccountMaxTransfer(t *testing.T) {
|
||||
assert.Equal(t, 0, n)
|
||||
assert.Equal(t, ErrorMaxTransferLimitReached, err)
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
stats = NewStats()
|
||||
acc = newAccountSizeName(stats, in, 1, "test")
|
||||
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
n, err = acc.Read(b)
|
||||
assert.Equal(t, 10, n)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestShortenName(t *testing.T) {
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
package accounting
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var namespace = "rclone_"
|
||||
|
||||
// RcloneCollector is a Prometheus collector for Rclone
|
||||
type RcloneCollector struct {
|
||||
bytesTransferred *prometheus.Desc
|
||||
transferSpeed *prometheus.Desc
|
||||
numOfErrors *prometheus.Desc
|
||||
numOfCheckFiles *prometheus.Desc
|
||||
transferredFiles *prometheus.Desc
|
||||
deletes *prometheus.Desc
|
||||
fatalError *prometheus.Desc
|
||||
retryError *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewRcloneCollector make a new RcloneCollector
|
||||
func NewRcloneCollector() *RcloneCollector {
|
||||
return &RcloneCollector{
|
||||
bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total",
|
||||
"Total transferred bytes since the start of the Rclone process",
|
||||
nil, nil,
|
||||
),
|
||||
transferSpeed: prometheus.NewDesc(namespace+"speed",
|
||||
"Average speed in bytes/sec since the start of the Rclone process",
|
||||
nil, nil,
|
||||
),
|
||||
numOfErrors: prometheus.NewDesc(namespace+"errors_total",
|
||||
"Number of errors thrown",
|
||||
nil, nil,
|
||||
),
|
||||
numOfCheckFiles: prometheus.NewDesc(namespace+"checked_files_total",
|
||||
"Number of checked files",
|
||||
nil, nil,
|
||||
),
|
||||
transferredFiles: prometheus.NewDesc(namespace+"files_transferred_total",
|
||||
"Number of transferred files",
|
||||
nil, nil,
|
||||
),
|
||||
deletes: prometheus.NewDesc(namespace+"files_deleted_total",
|
||||
"Total number of files deleted",
|
||||
nil, nil,
|
||||
),
|
||||
fatalError: prometheus.NewDesc(namespace+"fatal_error",
|
||||
"Whether a fatal error has occurred",
|
||||
nil, nil,
|
||||
),
|
||||
retryError: prometheus.NewDesc(namespace+"retry_error",
|
||||
"Whether there has been an error that will be retried",
|
||||
nil, nil,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Describe is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
||||
func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- c.bytesTransferred
|
||||
ch <- c.transferSpeed
|
||||
ch <- c.numOfErrors
|
||||
ch <- c.numOfCheckFiles
|
||||
ch <- c.transferredFiles
|
||||
ch <- c.deletes
|
||||
ch <- c.fatalError
|
||||
ch <- c.retryError
|
||||
}
|
||||
|
||||
// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector
|
||||
func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
s := GlobalStats()
|
||||
s.mu.RLock()
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes))
|
||||
ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.Speed())
|
||||
ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors))
|
||||
ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks))
|
||||
ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers))
|
||||
ch <- prometheus.MustNewConstMetric(c.deletes, prometheus.CounterValue, float64(s.deletes))
|
||||
ch <- prometheus.MustNewConstMetric(c.fatalError, prometheus.GaugeValue, bool2Float(s.fatalError))
|
||||
ch <- prometheus.MustNewConstMetric(c.retryError, prometheus.GaugeValue, bool2Float(s.retryError))
|
||||
|
||||
s.mu.RUnlock()
|
||||
}
|
||||
|
||||
// bool2Float is a small function to convert a boolean into a float64 value that can be used for Prometheus
|
||||
func bool2Float(e bool) float64 {
|
||||
if e {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -56,7 +56,13 @@ func NewStats() *StatsInfo {
|
||||
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
s.mu.RLock()
|
||||
out["speed"] = s.Speed()
|
||||
dt := s.totalDuration()
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
out["speed"] = speed
|
||||
out["bytes"] = s.bytes
|
||||
out["errors"] = s.errors
|
||||
out["fatalError"] = s.fatalError
|
||||
@@ -64,7 +70,7 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
out["checks"] = s.checks
|
||||
out["transfers"] = s.transfers
|
||||
out["deletes"] = s.deletes
|
||||
out["elapsedTime"] = s.totalDuration().Seconds()
|
||||
out["elapsedTime"] = dtSeconds
|
||||
s.mu.RUnlock()
|
||||
if !s.checking.empty() {
|
||||
var c []string
|
||||
@@ -95,17 +101,6 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Speed returns the average speed of the transfer in bytes/second
|
||||
func (s *StatsInfo) Speed() float64 {
|
||||
dt := s.totalDuration()
|
||||
dtSeconds := dt.Seconds()
|
||||
speed := 0.0
|
||||
if dt > 0 {
|
||||
speed = float64(s.bytes) / dtSeconds
|
||||
}
|
||||
return speed
|
||||
}
|
||||
|
||||
func (s *StatsInfo) transferRemoteStats(name string) rc.Params {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
@@ -387,22 +382,6 @@ func (s *StatsInfo) GetBytes() int64 {
|
||||
return s.bytes
|
||||
}
|
||||
|
||||
// GetBytesWithPending returns the number of bytes transferred and remaining transfers
|
||||
func (s *StatsInfo) GetBytesWithPending() int64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
pending := int64(0)
|
||||
for _, tr := range s.startedTransfers {
|
||||
if tr.acc != nil {
|
||||
bytes, size := tr.acc.progress()
|
||||
if bytes < size {
|
||||
pending += size - bytes
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.bytes + pending
|
||||
}
|
||||
|
||||
// Errors updates the stats for errors
|
||||
func (s *StatsInfo) Errors(errors int64) {
|
||||
s.mu.Lock()
|
||||
|
||||
@@ -93,7 +93,6 @@ type ConfigInfo struct {
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
|
||||
@@ -94,7 +94,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
|
||||
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
|
||||
flags.DurationVarP(flagSet, &fs.Config.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
|
||||
flags.FVarP(flagSet, &fs.Config.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
|
||||
flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
|
||||
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CutoffMode describes the possible delete modes in the config
|
||||
type CutoffMode byte
|
||||
|
||||
// MaxTransferMode constants
|
||||
const (
|
||||
CutoffModeHard CutoffMode = iota
|
||||
CutoffModeSoft
|
||||
CutoffModeCautious
|
||||
CutoffModeDefault = CutoffModeHard
|
||||
)
|
||||
|
||||
var cutoffModeToString = []string{
|
||||
CutoffModeHard: "HARD",
|
||||
CutoffModeSoft: "SOFT",
|
||||
CutoffModeCautious: "CAUTIOUS",
|
||||
}
|
||||
|
||||
// String turns a LogLevel into a string
|
||||
func (m CutoffMode) String() string {
|
||||
if m >= CutoffMode(len(cutoffModeToString)) {
|
||||
return fmt.Sprintf("CutoffMode(%d)", m)
|
||||
}
|
||||
return cutoffModeToString[m]
|
||||
}
|
||||
|
||||
// Set a LogLevel
|
||||
func (m *CutoffMode) Set(s string) error {
|
||||
for n, name := range cutoffModeToString {
|
||||
if s != "" && name == strings.ToUpper(s) {
|
||||
*m = CutoffMode(n)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf("Unknown cutoff mode %q", s)
|
||||
}
|
||||
|
||||
// Type of the value
|
||||
func (m *CutoffMode) Type() string {
|
||||
return "string"
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
package fs
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*CutoffMode)(nil)
|
||||
@@ -362,11 +362,11 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
// Try server side copy first - if has optional interface and
|
||||
// is same underlying remote
|
||||
actionTaken = "Copied (server side copy)"
|
||||
if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
|
||||
(fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReached
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
// Check transfer limit for server side copies
|
||||
if fs.Config.MaxTransfer >= 0 && accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) {
|
||||
return nil, accounting.ErrorMaxTransferLimitReached
|
||||
}
|
||||
in := tr.Account(nil) // account the transfer
|
||||
in.ServerSideCopyStart()
|
||||
newDst, err = doCopy(ctx, src, remote)
|
||||
|
||||
@@ -39,7 +39,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -1528,58 +1527,3 @@ func TestRcatSize(t *testing.T) {
|
||||
// Check files exist
|
||||
fstest.CheckItems(t, r.Fremote, file1, file2)
|
||||
}
|
||||
|
||||
func TestCopyFileMaxTransfer(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
old := fs.Config.MaxTransfer
|
||||
oldMode := fs.Config.CutoffMode
|
||||
|
||||
defer func() {
|
||||
fs.Config.MaxTransfer = old
|
||||
fs.Config.CutoffMode = oldMode
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
}()
|
||||
|
||||
file1 := r.WriteFile("file1", "file1 contents", t1)
|
||||
file2 := r.WriteFile("file2", "file2 contents...........", t2)
|
||||
|
||||
rfile1 := file1
|
||||
rfile1.Path = "sub/file1"
|
||||
rfile2 := file2
|
||||
rfile2.Path = "sub/file2"
|
||||
|
||||
fs.Config.MaxTransfer = 15
|
||||
fs.Config.CutoffMode = fs.CutoffModeHard
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err := operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile1.Path, file1.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeCautious
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1)
|
||||
assert.Contains(t, err.Error(), "Max transfer limit reached")
|
||||
assert.True(t, fserrors.IsFatalError(err))
|
||||
|
||||
fs.Config.CutoffMode = fs.CutoffModeSoft
|
||||
accounting.Stats(context.Background()).ResetCounters()
|
||||
|
||||
err = operations.CopyFile(context.Background(), r.Fremote, r.Flocal, rfile2.Path, file2.Path)
|
||||
require.NoError(t, err)
|
||||
fstest.CheckItems(t, r.Flocal, file1, file2)
|
||||
fstest.CheckItems(t, r.Fremote, rfile1, rfile2)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ type Options struct {
|
||||
WebGUINoOpenBrowser bool // set to disable auto opening browser
|
||||
WebGUIFetchURL string // set the default url for fetching webgui
|
||||
AccessControlAllowOrigin string // set the access control for CORS configuration
|
||||
EnableMetrics bool // set to disable prometheus metrics on /metrics
|
||||
JobExpireDuration time.Duration
|
||||
JobExpireInterval time.Duration
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &Opt.WebGUINoOpenBrowser, "rc-web-gui-no-open-browser", "", false, "Don't open the browser automatically")
|
||||
flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
|
||||
flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS.")
|
||||
flags.BoolVarP(flagSet, &Opt.EnableMetrics, "rc-enable-metrics", "", false, "Enable prometheus metrics on /metrics")
|
||||
flags.DurationVarP(flagSet, &Opt.JobExpireDuration, "rc-job-expire-duration", "", Opt.JobExpireDuration, "expire finished async jobs older than this value")
|
||||
flags.DurationVarP(flagSet, &Opt.JobExpireInterval, "rc-job-expire-interval", "", Opt.JobExpireInterval, "interval to check for expired async jobs")
|
||||
httpflags.AddFlagsPrefix(flagSet, "rc-", &Opt.HTTPOptions)
|
||||
|
||||
@@ -16,14 +16,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/serve"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
@@ -31,16 +26,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
)
|
||||
|
||||
var promHandler http.Handler
|
||||
|
||||
func init() {
|
||||
rcloneCollector := accounting.NewRcloneCollector()
|
||||
prometheus.MustRegister(rcloneCollector)
|
||||
promHandler = promhttp.Handler()
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
//
|
||||
// If the server wasn't configured the *Server returned may be nil
|
||||
@@ -347,9 +335,6 @@ func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string)
|
||||
// Serve /[fs]/remote files
|
||||
s.serveRemote(w, r, match[2], match[1])
|
||||
return
|
||||
case path == "metrics" && s.opt.EnableMetrics:
|
||||
promHandler.ServeHTTP(w, r)
|
||||
return
|
||||
case path == "*" && s.opt.Serve:
|
||||
// Serve /* as the remote listing
|
||||
s.serveRoot(w, r)
|
||||
|
||||
@@ -12,12 +12,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -483,59 +481,6 @@ func TestMethods(t *testing.T) {
|
||||
testServer(t, tests, &opt)
|
||||
}
|
||||
|
||||
func TestMetrics(t *testing.T) {
|
||||
stats := accounting.GlobalStats()
|
||||
tests := makeMetricsTestCases(stats)
|
||||
opt := newTestOpt()
|
||||
opt.EnableMetrics = true
|
||||
testServer(t, tests, &opt)
|
||||
|
||||
// Test changing a couple options
|
||||
stats.Bytes(500)
|
||||
stats.Deletes(30)
|
||||
stats.Errors(2)
|
||||
stats.Bytes(324)
|
||||
|
||||
tests = makeMetricsTestCases(stats)
|
||||
testServer(t, tests, &opt)
|
||||
}
|
||||
|
||||
func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) {
|
||||
tests = []testRun{{
|
||||
Name: "Bytes Transferred Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())),
|
||||
}, {
|
||||
Name: "Checked Files Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())),
|
||||
}, {
|
||||
Name: "Errors Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())),
|
||||
}, {
|
||||
Name: "Deleted Files Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.Deletes(0))),
|
||||
}, {
|
||||
Name: "Files Transferred Metric",
|
||||
URL: "/metrics",
|
||||
Method: "GET",
|
||||
Status: http.StatusOK,
|
||||
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())),
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var matchRemoteDirListing = regexp.MustCompile(`<title>List of all rclone remotes.</title>`)
|
||||
|
||||
func TestServingRoot(t *testing.T) {
|
||||
|
||||
@@ -879,15 +879,11 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
// Do the same thing to the entire contents of the directory
|
||||
_, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
// Only record matched (src & dst) empty dirs when performing move
|
||||
if s.DoMove {
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
}
|
||||
|
||||
// Record the src directory for deletion
|
||||
s.srcEmptyDirsMu.Lock()
|
||||
s.srcParentDirCheck(src)
|
||||
s.srcEmptyDirs[src.Remote()] = src
|
||||
s.srcEmptyDirsMu.Unlock()
|
||||
return true
|
||||
}
|
||||
// FIXME src is dir, dst is file
|
||||
|
||||
@@ -262,4 +262,4 @@ backends:
|
||||
- backend: "mailru"
|
||||
remote: "TestMailru:"
|
||||
subdir: false
|
||||
fastlist: false
|
||||
fastlist: false
|
||||
|
||||
13
go.mod
13
go.mod
@@ -13,17 +13,15 @@ require (
|
||||
github.com/atotto/clipboard v0.1.2
|
||||
github.com/aws/aws-sdk-go v1.29.9
|
||||
github.com/billziss-gh/cgofuse v1.2.0
|
||||
github.com/buengese/xxh32 v1.0.1
|
||||
github.com/djherbis/times v1.2.0
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible
|
||||
github.com/gabriel-vasile/mimetype v1.0.2
|
||||
github.com/etcd-io/bbolt v1.3.3
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54
|
||||
github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/klauspost/compress v1.10.1
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc
|
||||
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a
|
||||
@@ -36,13 +34,11 @@ require (
|
||||
github.com/ncw/swift v1.0.50
|
||||
github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
|
||||
github.com/onsi/ginkgo v1.12.0 // indirect
|
||||
github.com/onsi/gomega v1.9.0 // indirect
|
||||
github.com/onsi/ginkgo v1.9.0 // indirect
|
||||
github.com/onsi/gomega v1.6.0 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pierrec/lz4 v2.4.1+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46
|
||||
github.com/sevlyar/go-daemon v0.1.5
|
||||
@@ -54,11 +50,10 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.5.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d
|
||||
github.com/ulikunitz/xz v0.5.7
|
||||
github.com/xanzy/ssh-agent v0.2.1
|
||||
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.3
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
goftp.io/server v0.3.2
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0
|
||||
|
||||
66
go.sum
66
go.sum
@@ -51,9 +51,7 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efg
|
||||
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
|
||||
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo=
|
||||
github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k=
|
||||
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
|
||||
@@ -68,21 +66,13 @@ github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn
|
||||
github.com/aws/aws-sdk-go v1.29.9 h1:PHq9ddjfZYfCOXyqHKiCZ1CHRAk7nXhV7WTqj5l+bmQ=
|
||||
github.com/aws/aws-sdk-go v1.29.9/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA=
|
||||
github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
|
||||
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
|
||||
github.com/buengese/xxh32 v1.0.1 h1:aNZNg2XxotiTr6JD+R4bzmL1uzMZ2KEKvxyj4P1Z1Xw=
|
||||
github.com/buengese/xxh32 v1.0.1/go.mod h1:Q5GTtu7m/GuqzCc8YZ0n+oetaGFwW7oy291HvqLTZFk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@@ -108,19 +98,16 @@ github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+Lh
|
||||
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/frankban/quicktest v1.7.3 h1:kV0lw0TH1j1hozahVmcpFCsbV5hcS4ZalH+U7UoeTow=
|
||||
github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.0.2 h1:GKCo1TUCg0pV0R4atTcaLv/9SI2W9xPgMySZxUxcJOE=
|
||||
github.com/gabriel-vasile/mimetype v1.0.2/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
@@ -146,7 +133,6 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
@@ -160,7 +146,6 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
@@ -197,8 +182,6 @@ github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3/go.mod h1:PwUeyujmhaG
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
@@ -211,8 +194,6 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uia
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.10.1 h1:a/QY0o9S6wCi0XhxaMX/QmusicNUqCqFugR6WKPOSoQ=
|
||||
github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
@@ -244,15 +225,10 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
|
||||
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
|
||||
@@ -267,19 +243,16 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
|
||||
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
|
||||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/ginkgo v1.9.0 h1:SZjF721BByVj8QH636/8S2DnX4n0Re3SteMmw3N+tzc=
|
||||
github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.6.0 h1:8XTW0fcJZEq9q+Upcyws4JSGua2MFysCL5xkaSgHc+M=
|
||||
github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg=
|
||||
github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -290,29 +263,14 @@ github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
@@ -370,8 +328,6 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
|
||||
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
|
||||
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
|
||||
@@ -447,7 +403,6 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -489,13 +444,11 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -516,7 +469,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@@ -592,8 +544,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -604,10 +554,6 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
5
vendor/github.com/buengese/xxh32/go.mod
generated
vendored
5
vendor/github.com/buengese/xxh32/go.mod
generated
vendored
@@ -1,5 +0,0 @@
|
||||
module github.com/buengese/xxh32
|
||||
|
||||
go 1.13
|
||||
|
||||
require github.com/frankban/quicktest v1.7.3
|
||||
11
vendor/github.com/buengese/xxh32/go.sum
generated
vendored
11
vendor/github.com/buengese/xxh32/go.sum
generated
vendored
@@ -1,11 +0,0 @@
|
||||
github.com/frankban/quicktest v1.7.3 h1:kV0lw0TH1j1hozahVmcpFCsbV5hcS4ZalH+U7UoeTow=
|
||||
github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
223
vendor/github.com/buengese/xxh32/xxh32zero.go
generated
vendored
223
vendor/github.com/buengese/xxh32/xxh32zero.go
generated
vendored
@@ -1,223 +0,0 @@
|
||||
// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
|
||||
// (https://github.com/Cyan4973/XXH/)
|
||||
package xxh32
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint32 = 2654435761
|
||||
prime2 uint32 = 2246822519
|
||||
prime3 uint32 = 3266489917
|
||||
prime4 uint32 = 668265263
|
||||
prime5 uint32 = 374761393
|
||||
|
||||
primeMask = 0xFFFFFFFF
|
||||
prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
|
||||
prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
|
||||
)
|
||||
|
||||
// XXHZero represents an xxhash32 object with seed 0.
|
||||
type XXHZero struct {
|
||||
v1 uint32
|
||||
v2 uint32
|
||||
v3 uint32
|
||||
v4 uint32
|
||||
totalLen uint64
|
||||
buf [16]byte
|
||||
bufused int
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
func (xxh XXHZero) Sum(b []byte) []byte {
|
||||
h32 := xxh.Sum32()
|
||||
return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (xxh *XXHZero) Reset() {
|
||||
xxh.v1 = prime1plus2
|
||||
xxh.v2 = prime2
|
||||
xxh.v3 = 0
|
||||
xxh.v4 = prime1minus
|
||||
xxh.totalLen = 0
|
||||
xxh.bufused = 0
|
||||
}
|
||||
|
||||
// Size returns the number of bytes returned by Sum().
|
||||
func (xxh *XXHZero) Size() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// BlockSize gives the minimum number of bytes accepted by Write().
|
||||
func (xxh *XXHZero) BlockSize() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Write adds input bytes to the Hash.
|
||||
// It never returns an error.
|
||||
func (xxh *XXHZero) Write(input []byte) (int, error) {
|
||||
if xxh.totalLen == 0 {
|
||||
xxh.Reset()
|
||||
}
|
||||
n := len(input)
|
||||
m := xxh.bufused
|
||||
|
||||
xxh.totalLen += uint64(n)
|
||||
|
||||
r := len(xxh.buf) - m
|
||||
if n < r {
|
||||
copy(xxh.buf[m:], input)
|
||||
xxh.bufused += len(input)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
p := 0
|
||||
// Causes compiler to work directly from registers instead of stack:
|
||||
v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
|
||||
if m > 0 {
|
||||
// some data left from previous update
|
||||
copy(xxh.buf[xxh.bufused:], input[:r])
|
||||
xxh.bufused += len(input) - r
|
||||
|
||||
// fast rotl(13)
|
||||
buf := xxh.buf[:16] // BCE hint.
|
||||
v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
|
||||
v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
|
||||
v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
|
||||
v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
|
||||
p = r
|
||||
xxh.bufused = 0
|
||||
}
|
||||
|
||||
for n := n - 16; p <= n; p += 16 {
|
||||
sub := input[p:][:16] //BCE hint for compiler
|
||||
v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
|
||||
v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
|
||||
v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
|
||||
v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
|
||||
}
|
||||
xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
|
||||
|
||||
copy(xxh.buf[xxh.bufused:], input[p:])
|
||||
xxh.bufused += len(input) - p
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum32 returns the 32 bits Hash value.
|
||||
func (xxh *XXHZero) Sum32() uint32 {
|
||||
h32 := uint32(xxh.totalLen)
|
||||
if h32 >= 16 {
|
||||
h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
|
||||
} else {
|
||||
h32 += prime5
|
||||
}
|
||||
|
||||
p := 0
|
||||
n := xxh.bufused
|
||||
buf := xxh.buf
|
||||
for n := n - 4; p <= n; p += 4 {
|
||||
h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
|
||||
h32 = rol17(h32) * prime4
|
||||
}
|
||||
for ; p < n; p++ {
|
||||
h32 += uint32(buf[p]) * prime5
|
||||
h32 = rol11(h32) * prime1
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15
|
||||
h32 *= prime2
|
||||
h32 ^= h32 >> 13
|
||||
h32 *= prime3
|
||||
h32 ^= h32 >> 16
|
||||
|
||||
return h32
|
||||
}
|
||||
|
||||
// ChecksumZero returns the 32bits Hash value.
|
||||
func ChecksumZero(input []byte) uint32 {
|
||||
n := len(input)
|
||||
h32 := uint32(n)
|
||||
|
||||
if n < 16 {
|
||||
h32 += prime5
|
||||
} else {
|
||||
v1 := prime1plus2
|
||||
v2 := prime2
|
||||
v3 := uint32(0)
|
||||
v4 := prime1minus
|
||||
p := 0
|
||||
for n := n - 16; p <= n; p += 16 {
|
||||
sub := input[p:][:16] //BCE hint for compiler
|
||||
v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
|
||||
v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
|
||||
v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
|
||||
v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
|
||||
}
|
||||
input = input[p:]
|
||||
n -= p
|
||||
h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
}
|
||||
|
||||
p := 0
|
||||
for n := n - 4; p <= n; p += 4 {
|
||||
h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
|
||||
h32 = rol17(h32) * prime4
|
||||
}
|
||||
for p < n {
|
||||
h32 += uint32(input[p]) * prime5
|
||||
h32 = rol11(h32) * prime1
|
||||
p++
|
||||
}
|
||||
|
||||
h32 ^= h32 >> 15
|
||||
h32 *= prime2
|
||||
h32 ^= h32 >> 13
|
||||
h32 *= prime3
|
||||
h32 ^= h32 >> 16
|
||||
|
||||
return h32
|
||||
}
|
||||
|
||||
// Uint32Zero hashes x with seed 0.
|
||||
func Uint32Zero(x uint32) uint32 {
|
||||
h := prime5 + 4 + x*prime3
|
||||
h = rol17(h) * prime4
|
||||
h ^= h >> 15
|
||||
h *= prime2
|
||||
h ^= h >> 13
|
||||
h *= prime3
|
||||
h ^= h >> 16
|
||||
return h
|
||||
}
|
||||
|
||||
func rol1(u uint32) uint32 {
|
||||
return u<<1 | u>>31
|
||||
}
|
||||
|
||||
func rol7(u uint32) uint32 {
|
||||
return u<<7 | u>>25
|
||||
}
|
||||
|
||||
func rol11(u uint32) uint32 {
|
||||
return u<<11 | u>>21
|
||||
}
|
||||
|
||||
func rol12(u uint32) uint32 {
|
||||
return u<<12 | u>>20
|
||||
}
|
||||
|
||||
func rol13(u uint32) uint32 {
|
||||
return u<<13 | u>>19
|
||||
}
|
||||
|
||||
func rol17(u uint32) uint32 {
|
||||
return u<<17 | u>>15
|
||||
}
|
||||
|
||||
func rol18(u uint32) uint32 {
|
||||
return u<<18 | u>>14
|
||||
}
|
||||
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@@ -1,8 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
67
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,67 +0,0 @@
|
||||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
||||
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
0
vendor/github.com/cespare/xxhash/v2/go.sum
generated
vendored
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
236
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -1,236 +0,0 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
46
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
||||
0
vendor/go.etcd.io/bbolt/.gitignore → vendor/github.com/etcd-io/bbolt/.gitignore
generated
vendored
0
vendor/go.etcd.io/bbolt/.gitignore → vendor/github.com/etcd-io/bbolt/.gitignore
generated
vendored
0
vendor/go.etcd.io/bbolt/.travis.yml → vendor/github.com/etcd-io/bbolt/.travis.yml
generated
vendored
0
vendor/go.etcd.io/bbolt/.travis.yml → vendor/github.com/etcd-io/bbolt/.travis.yml
generated
vendored
0
vendor/go.etcd.io/bbolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
0
vendor/go.etcd.io/bbolt/LICENSE → vendor/github.com/etcd-io/bbolt/LICENSE
generated
vendored
0
vendor/go.etcd.io/bbolt/Makefile → vendor/github.com/etcd-io/bbolt/Makefile
generated
vendored
0
vendor/go.etcd.io/bbolt/Makefile → vendor/github.com/etcd-io/bbolt/Makefile
generated
vendored
0
vendor/go.etcd.io/bbolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
0
vendor/go.etcd.io/bbolt/README.md → vendor/github.com/etcd-io/bbolt/README.md
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_386.go → vendor/github.com/etcd-io/bbolt/bolt_386.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_386.go → vendor/github.com/etcd-io/bbolt/bolt_386.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_arm.go → vendor/github.com/etcd-io/bbolt/bolt_arm.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_arm.go → vendor/github.com/etcd-io/bbolt/bolt_arm.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_ppc.go → vendor/github.com/etcd-io/bbolt/bolt_ppc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_ppc.go → vendor/github.com/etcd-io/bbolt/bolt_ppc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_unix.go → vendor/github.com/etcd-io/bbolt/bolt_unix.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bolt_unix.go → vendor/github.com/etcd-io/bbolt/bolt_unix.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bucket.go → vendor/github.com/etcd-io/bbolt/bucket.go
generated
vendored
0
vendor/go.etcd.io/bbolt/bucket.go → vendor/github.com/etcd-io/bbolt/bucket.go
generated
vendored
0
vendor/go.etcd.io/bbolt/cursor.go → vendor/github.com/etcd-io/bbolt/cursor.go
generated
vendored
0
vendor/go.etcd.io/bbolt/cursor.go → vendor/github.com/etcd-io/bbolt/cursor.go
generated
vendored
0
vendor/go.etcd.io/bbolt/db.go → vendor/github.com/etcd-io/bbolt/db.go
generated
vendored
0
vendor/go.etcd.io/bbolt/db.go → vendor/github.com/etcd-io/bbolt/db.go
generated
vendored
0
vendor/go.etcd.io/bbolt/doc.go → vendor/github.com/etcd-io/bbolt/doc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/doc.go → vendor/github.com/etcd-io/bbolt/doc.go
generated
vendored
0
vendor/go.etcd.io/bbolt/errors.go → vendor/github.com/etcd-io/bbolt/errors.go
generated
vendored
0
vendor/go.etcd.io/bbolt/errors.go → vendor/github.com/etcd-io/bbolt/errors.go
generated
vendored
0
vendor/go.etcd.io/bbolt/freelist.go → vendor/github.com/etcd-io/bbolt/freelist.go
generated
vendored
0
vendor/go.etcd.io/bbolt/freelist.go → vendor/github.com/etcd-io/bbolt/freelist.go
generated
vendored
0
vendor/go.etcd.io/bbolt/node.go → vendor/github.com/etcd-io/bbolt/node.go
generated
vendored
0
vendor/go.etcd.io/bbolt/node.go → vendor/github.com/etcd-io/bbolt/node.go
generated
vendored
0
vendor/go.etcd.io/bbolt/page.go → vendor/github.com/etcd-io/bbolt/page.go
generated
vendored
0
vendor/go.etcd.io/bbolt/page.go → vendor/github.com/etcd-io/bbolt/page.go
generated
vendored
0
vendor/go.etcd.io/bbolt/tx.go → vendor/github.com/etcd-io/bbolt/tx.go
generated
vendored
0
vendor/go.etcd.io/bbolt/tx.go → vendor/github.com/etcd-io/bbolt/tx.go
generated
vendored
1
vendor/github.com/gabriel-vasile/mimetype/.gitattributes
generated
vendored
1
vendor/github.com/gabriel-vasile/mimetype/.gitattributes
generated
vendored
@@ -1 +0,0 @@
|
||||
testdata/* linguist-vendored
|
||||
14
vendor/github.com/gabriel-vasile/mimetype/.travis.yml
generated
vendored
14
vendor/github.com/gabriel-vasile/mimetype/.travis.yml
generated
vendored
@@ -1,14 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.12"
|
||||
- "master"
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/client9/misspell/cmd/misspell
|
||||
before_script:
|
||||
- go vet .
|
||||
script:
|
||||
- diff -u <(echo -n) <(gofmt -d ./)
|
||||
- go test -v
|
||||
- $GOPATH/bin/goveralls -service=travis-ci
|
||||
- misspell -locale US -error *.md *.go
|
||||
76
vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
generated
vendored
76
vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,76 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at vasile.gabriel@email.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
12
vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
generated
vendored
12
vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
generated
vendored
@@ -1,12 +0,0 @@
|
||||
## Contribute
|
||||
Contributions to **mimetype** are welcome. If you find an issue and you consider
|
||||
contributing, you can use the [Github issues tracker](https://github.com/gabriel-vasile/mimetype/issues)
|
||||
in order to report it, or better yet, open a pull request.
|
||||
|
||||
Code contributions must respect these rules:
|
||||
- code must be test covered
|
||||
- code must be formatted using gofmt tool
|
||||
- exported names must be documented
|
||||
|
||||
**Important**: By submitting a pull request, you agree to allow the project
|
||||
owner to license your work under the same license as that used by the project.
|
||||
79
vendor/github.com/gabriel-vasile/mimetype/EXAMPLES.md
generated
vendored
79
vendor/github.com/gabriel-vasile/mimetype/EXAMPLES.md
generated
vendored
@@ -1,79 +0,0 @@
|
||||
## Examples
|
||||
- [Detect MIME type](#detect)
|
||||
- [Check against MIME type](#check)
|
||||
- [Check base MIME type](#check-parent)
|
||||
- [Binary file vs text file](#binary-file-vs-text-file)
|
||||
|
||||
### Detect
|
||||
Get the MIME type from a slice of bytes, from a reader and from a file.
|
||||
```go
|
||||
// Detect the MIME type of a file stored as a byte slice.
|
||||
file := "testdata/pdf.pdf"
|
||||
// Detect the MIME type of a file.
|
||||
mime, ferr := mimetype.DetectFile(file)
|
||||
fmt.Println(mime, ferr)
|
||||
// Output: application/pdf nil
|
||||
|
||||
// Detect the MIME type of a reader.
|
||||
reader, _ := os.Open(file) // ignoring error for brevity's sake
|
||||
mime, rerr := mimetype.DetectReader(reader)
|
||||
fmt.Println(mime, rerr)
|
||||
// Output: application/pdf nil
|
||||
|
||||
mime := mimetype.Detect(data)
|
||||
fmt.Println(mime)
|
||||
// Output: application/pdf
|
||||
```
|
||||
|
||||
### Check
|
||||
Test if a file has a specific MIME type. Also accepts MIME type aliases.
|
||||
```go
|
||||
mime, err := mimetype.DetectFile("testdata/zip.zip")
|
||||
// application/x-zip is an alias of application/zip,
|
||||
// therefore Is returns true both times.
|
||||
fmt.Println(mime.Is("application/zip"), mime.Is("application/x-zip"), err)
|
||||
|
||||
// Output: true true <nil>
|
||||
```
|
||||
|
||||
### Check parent
|
||||
Test if a file has a specific base MIME type. First perform a detect on the
|
||||
input and then navigate the parents until the base MIME type is found.
|
||||
|
||||
Considering JAR files are just ZIPs containing some metadata files,
|
||||
if, for example, you need to tell if the input can be unzipped, go up the
|
||||
MIME hierarchy until zip is found or the root is reached.
|
||||
```go
|
||||
detectedMIME, err := mimetype.DetectFile("testdata/jar.jar")
|
||||
|
||||
zip := false
|
||||
for mime := detectedMIME; mime != nil; mime = mime.Parent() {
|
||||
if mime.Is("application/zip") {
|
||||
zip = true
|
||||
}
|
||||
}
|
||||
|
||||
// zip is true, even if the detected MIME was application/jar.
|
||||
fmt.Println(zip, detectedMIME, err)
|
||||
|
||||
// Output: true application/jar <nil>
|
||||
```
|
||||
|
||||
### Binary file vs text file
|
||||
Considering the definition of a binary file as "a computer file that is not
|
||||
a text file", they can be differentiated by searching for the text/plain MIME
|
||||
in it's MIME hierarchy.
|
||||
```go
|
||||
detectedMIME, err := mimetype.DetectFile("testdata/xml.xml")
|
||||
|
||||
isBinary := true
|
||||
for mime := detectedMIME; mime != nil; mime = mime.Parent() {
|
||||
if mime.Is("text/plain") {
|
||||
isBinary = false
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println(isBinary, detectedMIME, err)
|
||||
|
||||
// Output: false text/xml; charset=utf-8 <nil>
|
||||
```
|
||||
21
vendor/github.com/gabriel-vasile/mimetype/LICENSE
generated
vendored
21
vendor/github.com/gabriel-vasile/mimetype/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018, 2019 Gabriel Vasile
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
74
vendor/github.com/gabriel-vasile/mimetype/README.md
generated
vendored
74
vendor/github.com/gabriel-vasile/mimetype/README.md
generated
vendored
@@ -1,74 +0,0 @@
|
||||
<h1 align="center">
|
||||
mimetype
|
||||
</h1>
|
||||
|
||||
<h4 align="center">
|
||||
A package for detecting MIME types and extensions based on magic numbers
|
||||
</h4>
|
||||
<h6 align="center">
|
||||
No C bindings, zero dependencies and thread safe
|
||||
</h6>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://travis-ci.org/gabriel-vasile/mimetype">
|
||||
<img alt="Build Status" src="https://travis-ci.org/gabriel-vasile/mimetype.svg?branch=master">
|
||||
</a>
|
||||
<a href="https://godoc.org/github.com/gabriel-vasile/mimetype">
|
||||
<img alt="Documentation" src="https://godoc.org/github.com/gabriel-vasile/mimetype?status.svg">
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/gabriel-vasile/mimetype">
|
||||
<img alt="Go report card" src="https://goreportcard.com/badge/github.com/gabriel-vasile/mimetype">
|
||||
</a>
|
||||
<a href="https://coveralls.io/github/gabriel-vasile/mimetype?branch=master">
|
||||
<img alt="Go report card" src="https://coveralls.io/repos/github/gabriel-vasile/mimetype/badge.svg?branch=master">
|
||||
</a>
|
||||
<a href="LICENSE">
|
||||
<img alt="License" src="https://img.shields.io/badge/License-MIT-green.svg">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## Install
|
||||
```bash
|
||||
go get github.com/gabriel-vasile/mimetype
|
||||
```
|
||||
|
||||
## Usage
|
||||
There are quick [examples](EXAMPLES.md) and
|
||||
[GoDoc](https://godoc.org/github.com/gabriel-vasile/mimetype) for full reference.
|
||||
|
||||
## Upgrade from v0.3.x to v1.x
|
||||
In v1.x the detect functions no longer return the MIME type and extension as
|
||||
strings. Instead they return a [MIME](https://godoc.org/github.com/gabriel-vasile/mimetype#MIME)
|
||||
struct. To get the string value of the MIME and the extension, call the
|
||||
`String()` and the `Extension()` methods.
|
||||
|
||||
In order to play better with the stdlib `mime` package, v1.x file extensions
|
||||
include the leading dot, as in ".html".
|
||||
|
||||
In v1.x the `text/plain` MIME type is `text/plain; charset=utf-8`.
|
||||
|
||||
## Supported MIME types
|
||||
See [supported mimes](supported_mimes.md) for the list of detected MIME types.
|
||||
If support is needed for a specific file format, please open an [issue](https://github.com/gabriel-vasile/mimetype/issues/new/choose).
|
||||
|
||||
## Structure
|
||||
**mimetype** uses an hierarchical structure to keep the MIME type detection logic.
|
||||
This reduces the number of calls needed for detecting the file type. The reason
|
||||
behind this choice is that there are file formats used as containers for other
|
||||
file formats. For example, Microsoft Office files are just zip archives,
|
||||
containing specific metadata files. Once a file a file has been identified as a
|
||||
zip, there is no need to check if it is a text file, but it is worth checking if
|
||||
it is an Microsoft Office file.
|
||||
|
||||
To prevent loading entire files into memory, when detecting from a
|
||||
[reader](https://godoc.org/github.com/gabriel-vasile/mimetype#DetectReader)
|
||||
or from a [file](https://godoc.org/github.com/gabriel-vasile/mimetype#DetectFile)
|
||||
**mimetype** limits itself to reading only the first
|
||||
[3072](https://github.com/gabriel-vasile/mimetype/blob/master/internal/matchers/matchers.go#L6)
|
||||
bytes from the input.
|
||||
<div align="center">
|
||||
<img alt="structure" src="mimetype.gif" width="88%">
|
||||
</div>
|
||||
|
||||
## Contributing
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
3
vendor/github.com/gabriel-vasile/mimetype/go.mod
generated
vendored
3
vendor/github.com/gabriel-vasile/mimetype/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/gabriel-vasile/mimetype
|
||||
|
||||
go 1.12
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user