mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 11:53:17 +00:00
Compare commits
21 Commits
pr-4698-jo
...
fix-4704-s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b26d5da84e | ||
|
|
c1bf3f3999 | ||
|
|
fd2c373af1 | ||
|
|
66c8d3bf2b | ||
|
|
e00bf3d723 | ||
|
|
605f2b819a | ||
|
|
bf2b975359 | ||
|
|
00a5086ff2 | ||
|
|
be6a888e50 | ||
|
|
dad8447423 | ||
|
|
65ff109065 | ||
|
|
b7253fc1c1 | ||
|
|
d143f576c6 | ||
|
|
a152351a71 | ||
|
|
a2fa1370c5 | ||
|
|
bed83b0b64 | ||
|
|
cf0bdad5de | ||
|
|
85d35ef03c | ||
|
|
514d10b314 | ||
|
|
5164c3d2d0 | ||
|
|
ffdd0719e7 |
18
.github/workflows/build.yml
vendored
18
.github/workflows/build.yml
vendored
@@ -107,10 +107,10 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
|
||||
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
@@ -124,6 +124,8 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
@@ -133,10 +135,10 @@ jobs:
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
@@ -223,8 +225,8 @@ jobs:
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
|
||||
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
|
||||
4
Makefile
4
Makefile
@@ -93,8 +93,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
cd /tmp && go get github.com/goreleaser/nfpm/...
|
||||
cd /tmp && go get github.com/github-release/github-release
|
||||
|
||||
# Get the release dependencies we only install on Windows
|
||||
release_dep_windows:
|
||||
|
||||
@@ -1130,6 +1130,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
||||
|
||||
switch {
|
||||
case c.fs.useMD5:
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||
fs.Debugf(src, "skip slow MD5 on source file, hashing in-transit")
|
||||
c.hasher = md5.New()
|
||||
break
|
||||
}
|
||||
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
|
||||
if c.fs.hashFallback {
|
||||
c.sha1, _ = src.Hash(ctx, hash.SHA1)
|
||||
@@ -1138,6 +1144,12 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
}
|
||||
case c.fs.useSHA1:
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj != nil && srcObj.Fs().Features().SlowHash {
|
||||
fs.Debugf(src, "skip slow SHA1 on source file, hashing in-transit")
|
||||
c.hasher = sha1.New()
|
||||
break
|
||||
}
|
||||
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
|
||||
if c.fs.hashFallback {
|
||||
c.md5, _ = src.Hash(ctx, hash.MD5)
|
||||
|
||||
@@ -553,7 +553,7 @@ func (f *Fs) setEndpointURL() {
|
||||
if f.opt.Mountpoint == "" {
|
||||
f.opt.Mountpoint = defaultMountpoint
|
||||
}
|
||||
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
|
||||
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
|
||||
@@ -102,6 +102,7 @@ func init() {
|
||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||
in case of generally available files like popular books, video or audio clips,
|
||||
because files are searched by hash in all accounts of all mailru users.
|
||||
It is meaningless and ineffective if source file is unique or encrypted.
|
||||
Please note that rclone may need local memory and disk space to calculate
|
||||
content hash in advance and decide whether full upload is required.
|
||||
Also, if rclone does not know file size in advance (e.g. in case of
|
||||
@@ -192,7 +193,7 @@ This option must not be used by an ordinary user. It is intended only to
|
||||
facilitate remote troubleshooting of backend issues. Strict meaning of
|
||||
flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist`,
|
||||
Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -381,6 +382,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
type quirks struct {
|
||||
binlist bool
|
||||
atomicmkdir bool
|
||||
unknowndirs bool
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
@@ -401,6 +403,9 @@ func (q *quirks) parseQuirks(option string) {
|
||||
// use mkdir as a locking primitive and depend on its atomicity.
|
||||
// Remove this quirk when the above issue is investigated.
|
||||
q.atomicmkdir = true
|
||||
case "unknowndirs":
|
||||
// Accepts unknown resource types as folders.
|
||||
q.unknowndirs = true
|
||||
default:
|
||||
// Ignore unknown flags
|
||||
}
|
||||
@@ -518,7 +523,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
|
||||
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
|
||||
}
|
||||
|
||||
// metaServer ...
|
||||
// metaServer returns URL of current meta server
|
||||
func (f *Fs) metaServer(ctx context.Context) (string, error) {
|
||||
f.metaMu.Lock()
|
||||
defer f.metaMu.Unlock()
|
||||
@@ -623,33 +628,56 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
switch item.Kind {
|
||||
case "folder":
|
||||
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
|
||||
dirSize := item.Count.Files + item.Count.Folders
|
||||
return dir, dirSize, nil
|
||||
case "file":
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: time.Unix(mTime, 0),
|
||||
}
|
||||
return file, -1, nil
|
||||
default:
|
||||
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
|
||||
modTime := time.Unix(mTime, 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
|
||||
return dir, item.Count.Files + item.Count.Folders, nil
|
||||
}
|
||||
|
||||
binHash, err := mrhash.DecodeString(item.Hash)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
hasMetaData: true,
|
||||
size: item.Size,
|
||||
mrHash: binHash,
|
||||
modTime: modTime,
|
||||
}
|
||||
return file, -1, nil
|
||||
}
|
||||
|
||||
// isDir returns true for directories, false for files
|
||||
func (f *Fs) isDir(kind, path string) (bool, error) {
|
||||
switch kind {
|
||||
case "":
|
||||
return false, errors.New("empty resource type")
|
||||
case "file":
|
||||
return false, nil
|
||||
case "folder":
|
||||
// fall thru
|
||||
case "camera-upload", "mounted", "shared":
|
||||
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
|
||||
default:
|
||||
if !f.quirks.unknowndirs {
|
||||
return false, fmt.Errorf("unknown resource type %q", kind)
|
||||
}
|
||||
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
@@ -717,7 +745,11 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Body.Kind != "folder" {
|
||||
isDir, err := f.isDir(info.Body.Kind, dirPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !isDir {
|
||||
return nil, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
@@ -1570,23 +1602,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var (
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
trySpeedup bool
|
||||
fileBuf []byte
|
||||
fileHash []byte
|
||||
newHash []byte
|
||||
slowHash bool
|
||||
localSrc bool
|
||||
)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
|
||||
srcFeatures := srcObj.Fs().Features()
|
||||
slowHash = srcFeatures.SlowHash
|
||||
localSrc = srcFeatures.IsLocal
|
||||
}
|
||||
|
||||
// Don't disturb the source if file fits in hash.
|
||||
// Skip an extra speedup request if file fits in hash.
|
||||
if size > mrhash.Size {
|
||||
// Request hash from source.
|
||||
// Try speedup if it's globally enabled but skip extra post
|
||||
// request if file is small and fits in the metadata request
|
||||
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
|
||||
|
||||
// Try to get the hash if it's instant
|
||||
if trySpeedup && !slowHash {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
|
||||
// Try speedup if it's globally enabled and source hash is available.
|
||||
trySpeedup = o.fs.opt.SpeedupEnable
|
||||
if trySpeedup && fileHash != nil {
|
||||
if fileHash != nil {
|
||||
if o.putByHash(ctx, fileHash, src, "source") {
|
||||
return nil
|
||||
}
|
||||
@@ -1595,13 +1632,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Need to calculate hash, check whether file is still eligible for speedup
|
||||
if trySpeedup {
|
||||
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||
|
||||
// Attempt to put by hash if file is local and eligible
|
||||
if trySpeedup && localSrc {
|
||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||
}
|
||||
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
|
||||
return nil
|
||||
}
|
||||
// If local file hashing has failed, it's pointless to try anymore
|
||||
trySpeedup = false
|
||||
}
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
//fs.Debugf(o, "attempt to put by hash from memory")
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1731,6 +1777,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// putByHash is a thin wrapper around addFileMetaData
|
||||
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
||||
oNew := new(Object)
|
||||
*oNew = *o
|
||||
@@ -2157,6 +2204,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1190,8 +1190,7 @@ rclone does if you know the bucket exists already.
|
||||
// - trailing / encoding
|
||||
// so that AWS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
encoder.EncodeSlash,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
@@ -1387,7 +1386,8 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
fs.Debugf(nil, "SPLIT %q %q", f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -1500,6 +1500,9 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// Allow URI with "." etc
|
||||
awsConfig.DisableRestProtocolURICleaning = aws.Bool(true)
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
awsSessionOpts := session.Options{
|
||||
Config: *awsConfig,
|
||||
@@ -1734,7 +1737,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
||||
// bucket to the start.
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
@@ -1765,7 +1768,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
Bucket: &bucket,
|
||||
Bucket: &bucketName,
|
||||
Delimiter: &delimiter,
|
||||
Prefix: &directory,
|
||||
MaxKeys: &f.opt.ListChunk,
|
||||
@@ -1805,7 +1808,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.StatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucketName)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1833,7 +1836,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
remote = remote[:len(remote)-1]
|
||||
@@ -1861,7 +1864,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
remote = bucket.Join(bucketName, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
@@ -2147,7 +2150,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
if f.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &f.opt.ServerSideEncryption
|
||||
|
||||
@@ -145,11 +145,16 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
} else if dir == "" {
|
||||
// If root dirs not created then create them
|
||||
upstreams, err = f.upstreams, nil
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -818,6 +823,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenBashCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenBashCompletionFile(out)
|
||||
|
||||
@@ -2,6 +2,7 @@ package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenFishCompletion(os.Stdout, true)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
err := cmd.Root.GenFishCompletionFile(out, true)
|
||||
|
||||
@@ -11,8 +11,10 @@ import (
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -21,11 +23,32 @@ func TestCompletionBash(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionBashStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -34,11 +57,31 @@ func TestCompletionZsh(t *testing.T) {
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZshStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() { _ = tempFile.Close() }()
|
||||
defer func() { _ = os.Remove(tempFile.Name()) }()
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
@@ -46,3 +89,22 @@ func TestCompletionFish(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFishStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
os.Stdout = tempFile
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
@@ -30,11 +30,20 @@ them directly
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
if len(args) > 0 {
|
||||
if args[0] == "-" {
|
||||
err := cmd.Root.GenZshCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
out = args[0]
|
||||
}
|
||||
outFile, err := os.Create(out)
|
||||
|
||||
@@ -83,8 +83,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("mount", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not Windows).")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
@@ -101,7 +101,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check is folder is empty
|
||||
// Check if folder is empty
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, fpErr := os.Open(mountpoint)
|
||||
|
||||
@@ -359,15 +359,24 @@ When --vfs-read-chunk-size-limit 500M is specified, the result would be
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
// Skip checkMountEmpty if --allow-non-empty flag is used or if
|
||||
// the Operating System is Windows
|
||||
if !opt.AllowNonEmpty && runtime.GOOS != "windows" {
|
||||
// Inform about ignored flags on Windows,
|
||||
// and if not on Windows and not --allow-non-empty flag is used
|
||||
// verify that mountpoint is empty.
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
} else if !opt.AllowNonEmpty {
|
||||
err := checkMountEmpty(mountpoint)
|
||||
if err != nil {
|
||||
log.Fatalf("Fatal error: %v", err)
|
||||
}
|
||||
} else if opt.AllowNonEmpty && runtime.GOOS == "windows" {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
|
||||
// Work out the volume name, removing special
|
||||
|
||||
@@ -71,7 +71,7 @@ func helpText() (tr []string) {
|
||||
" ←,h to return",
|
||||
" c toggle counts",
|
||||
" g toggle graph",
|
||||
" n,s,C sort by name,size,count",
|
||||
" n,s,C,A sort by name,size,count,average size",
|
||||
" d delete file/directory",
|
||||
}
|
||||
if !clipboard.Unsupported {
|
||||
@@ -88,27 +88,28 @@ func helpText() (tr []string) {
|
||||
|
||||
// UI contains the state of the user interface
|
||||
type UI struct {
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
d *scan.Dir // current directory being displayed
|
||||
path string // path of current directory
|
||||
showBox bool // whether to show a box
|
||||
boxText []string // text to show in box
|
||||
boxMenu []string // box menu options
|
||||
boxMenuButton int
|
||||
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
|
||||
entries fs.DirEntries // entries of current directory
|
||||
sortPerm []int // order to display entries in after sorting
|
||||
invSortPerm []int // inverse order
|
||||
dirListHeight int // height of listing
|
||||
listing bool // whether listing is in progress
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByCount int8
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
d *scan.Dir // current directory being displayed
|
||||
path string // path of current directory
|
||||
showBox bool // whether to show a box
|
||||
boxText []string // text to show in box
|
||||
boxMenu []string // box menu options
|
||||
boxMenuButton int
|
||||
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
|
||||
entries fs.DirEntries // entries of current directory
|
||||
sortPerm []int // order to display entries in after sorting
|
||||
invSortPerm []int // inverse order
|
||||
dirListHeight int // height of listing
|
||||
listing bool // whether listing is in progress
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByCount int8
|
||||
sortByAverageSize int8
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
}
|
||||
|
||||
// Where we have got to in the directory listing
|
||||
@@ -496,9 +497,17 @@ type ncduSort struct {
|
||||
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
isize, icount, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if icount > 0 {
|
||||
iAvgSize = float64(isize / icount)
|
||||
}
|
||||
if jcount > 0 {
|
||||
jAvgSize = float64(jsize / jcount)
|
||||
}
|
||||
|
||||
switch {
|
||||
case ds.u.sortByName < 0:
|
||||
return iname > jname
|
||||
@@ -520,6 +529,18 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
}
|
||||
case ds.u.sortByAverageSize < 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize < jsize
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize > jsize
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
@@ -628,6 +649,7 @@ func (u *UI) toggleSort(sortType *int8) {
|
||||
u.sortBySize = 0
|
||||
u.sortByCount = 0
|
||||
u.sortByName = 0
|
||||
u.sortByAverageSize = 0
|
||||
if old == 0 {
|
||||
*sortType = 1
|
||||
} else {
|
||||
@@ -742,6 +764,8 @@ outer:
|
||||
u.toggleSort(&u.sortBySize)
|
||||
case 'C':
|
||||
u.toggleSort(&u.sortByCount)
|
||||
case 'A':
|
||||
u.toggleSort(&u.sortByAverageSize)
|
||||
case 'y':
|
||||
u.copyPath()
|
||||
case 'Y':
|
||||
|
||||
@@ -421,3 +421,4 @@ put them back in again.` >}}
|
||||
* Dan Hipschman <dan.hipschman@opendoor.com>
|
||||
* Josh Soref <jsoref@users.noreply.github.com>
|
||||
* David <david@staron.nl>
|
||||
* Ingo <ingo@hoffmann.cx>
|
||||
|
||||
@@ -5,6 +5,56 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.53.2 - 2020-10-26
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||
|
||||
* Bug Fixes
|
||||
* acounting
|
||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
|
||||
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
|
||||
* build
|
||||
* Work around GitHub actions brew problem (Nick Craig-Wood)
|
||||
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
|
||||
* Mount
|
||||
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
|
||||
* VFS
|
||||
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
|
||||
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
|
||||
* Fix a race condition in retryFailedResets (Leo Luan)
|
||||
* Fix missed concurrency control between some item operations and reset (Leo Luan)
|
||||
* Add exponential backoff during ENOSPC retries (Leo Luan)
|
||||
* Add a missed update of used cache space (Leo Luan)
|
||||
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
|
||||
* Chunker
|
||||
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
|
||||
* Fix upload over crypt (Ivan Andreev)
|
||||
* Fichier
|
||||
* Increase maximum file size from 100GB to 300GB (gyutw)
|
||||
* Jottacloud
|
||||
* Remove clientSecret from config when upgrading to token based authentication (buengese)
|
||||
* Avoid double url escaping of device/mountpoint (albertony)
|
||||
* Remove DirMove workaround as it's not required anymore - also (buengese)
|
||||
* Mailru
|
||||
* Fix uploads after recent changes on server (Ivan Andreev)
|
||||
* Fix range requests after june changes on server (Ivan Andreev)
|
||||
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Fix disk usage for sharepoint (Nick Craig-Wood)
|
||||
* S3
|
||||
* Add missing regions for AWS (Anagh Kumar Baranwal)
|
||||
* Seafile
|
||||
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
|
||||
* SFTP
|
||||
* Always convert the checksum to lower case (buengese)
|
||||
* Union
|
||||
* Create root directories if none exist (Nick Craig-Wood)
|
||||
|
||||
## v1.53.1 - 2020-09-13
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete bash [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete fish [output_file] [flags]
|
||||
|
||||
@@ -27,6 +27,7 @@ them directly
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is `-`, then the output will be written to stdout.
|
||||
|
||||
```
|
||||
rclone genautocomplete zsh [output_file] [flags]
|
||||
|
||||
@@ -27,15 +27,16 @@ type Test struct {
|
||||
//
|
||||
// FIXME make bucket based remotes set sub-dir automatically???
|
||||
type Backend struct {
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
FastList bool // set to test with -fast-list
|
||||
Short bool // set to test with -short
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
MaxFile string // file size limit
|
||||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
Backend string // name of the backend directory
|
||||
Remote string // name of the test remote
|
||||
FastList bool // set to test with -fast-list
|
||||
Short bool // set to test with -short
|
||||
OneOnly bool // set to run only one backend test at once
|
||||
MaxFile string // file size limit
|
||||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
ListRetries int // -list-retries if > 0
|
||||
}
|
||||
|
||||
// includeTest returns true if this backend should be included in this
|
||||
@@ -79,16 +80,17 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) {
|
||||
continue
|
||||
}
|
||||
run := &Run{
|
||||
Remote: b.Remote,
|
||||
Backend: b.Backend,
|
||||
Path: t.Path,
|
||||
FastList: fastlist,
|
||||
Short: (b.Short && t.Short),
|
||||
NoRetries: t.NoRetries,
|
||||
OneOnly: b.OneOnly,
|
||||
NoBinary: t.NoBinary,
|
||||
SizeLimit: int64(maxSize),
|
||||
Ignore: ignore,
|
||||
Remote: b.Remote,
|
||||
Backend: b.Backend,
|
||||
Path: t.Path,
|
||||
FastList: fastlist,
|
||||
Short: (b.Short && t.Short),
|
||||
NoRetries: t.NoRetries,
|
||||
OneOnly: b.OneOnly,
|
||||
NoBinary: t.NoBinary,
|
||||
SizeLimit: int64(maxSize),
|
||||
Ignore: ignore,
|
||||
ListRetries: b.ListRetries,
|
||||
}
|
||||
if t.AddBackend {
|
||||
run.Path = path.Join(run.Path, b.Backend)
|
||||
|
||||
@@ -20,6 +20,7 @@ backends:
|
||||
- backend: "b2"
|
||||
remote: "TestB2:"
|
||||
fastlist: true
|
||||
listretries: 5
|
||||
- backend: "crypt"
|
||||
remote: "TestCryptDrive:"
|
||||
fastlist: true
|
||||
@@ -146,12 +147,12 @@ backends:
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
- backend: "s3"
|
||||
remote: "TestS3Ceph:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
# - backend: "s3"
|
||||
# remote: "TestS3Ceph:"
|
||||
# fastlist: true
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/SetTier
|
||||
- backend: "s3"
|
||||
remote: "TestS3Alibaba:"
|
||||
fastlist: true
|
||||
@@ -172,11 +173,11 @@ backends:
|
||||
- backend: "swift"
|
||||
remote: "TestSwift:"
|
||||
fastlist: true
|
||||
- backend: "swift"
|
||||
remote: "TestSwiftCeph:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
# - backend: "swift"
|
||||
# remote: "TestSwiftCeph:"
|
||||
# fastlist: true
|
||||
# ignore:
|
||||
# - TestIntegration/FsMkdir/FsPutFiles/FsCopy
|
||||
- backend: "yandex"
|
||||
remote: "TestYandex:"
|
||||
fastlist: false
|
||||
|
||||
@@ -35,16 +35,17 @@ var (
|
||||
// if retries are needed.
|
||||
type Run struct {
|
||||
// Config
|
||||
Remote string // name of the test remote
|
||||
Backend string // name of the backend
|
||||
Path string // path to the source directory
|
||||
FastList bool // add -fast-list to tests
|
||||
Short bool // add -short
|
||||
NoRetries bool // don't retry if set
|
||||
OneOnly bool // only run test for this backend at once
|
||||
NoBinary bool // set to not build a binary
|
||||
SizeLimit int64 // maximum test file size
|
||||
Ignore map[string]struct{}
|
||||
Remote string // name of the test remote
|
||||
Backend string // name of the backend
|
||||
Path string // path to the source directory
|
||||
FastList bool // add -fast-list to tests
|
||||
Short bool // add -short
|
||||
NoRetries bool // don't retry if set
|
||||
OneOnly bool // only run test for this backend at once
|
||||
NoBinary bool // set to not build a binary
|
||||
SizeLimit int64 // maximum test file size
|
||||
Ignore map[string]struct{}
|
||||
ListRetries int // -list-retries if > 0
|
||||
// Internals
|
||||
CmdLine []string
|
||||
CmdString string
|
||||
@@ -336,8 +337,12 @@ func (r *Run) Init() {
|
||||
r.CmdLine = []string{"./" + r.BinaryName()}
|
||||
}
|
||||
r.CmdLine = append(r.CmdLine, prefix+"v", prefix+"timeout", timeout.String(), "-remote", r.Remote)
|
||||
if *listRetries > 0 {
|
||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(*listRetries))
|
||||
listRetries := *listRetries
|
||||
if r.ListRetries > 0 {
|
||||
listRetries = r.ListRetries
|
||||
}
|
||||
if listRetries > 0 {
|
||||
r.CmdLine = append(r.CmdLine, "-list-retries", fmt.Sprint(listRetries))
|
||||
}
|
||||
r.Try = 1
|
||||
if *verbose {
|
||||
|
||||
1
go.sum
1
go.sum
@@ -400,6 +400,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
||||
@@ -29,6 +29,23 @@ func Split(absPath string) (bucket, bucketPath string) {
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
|
||||
// Join joins any number of path elements into a single path, adding a
|
||||
// separating slash if necessary. Empty elements are ignored.
|
||||
//
|
||||
// Unlike path.Join this does not run path.Clean on the elements so a
|
||||
// path called "." will be preserved.
|
||||
func Join(elem ...string) (out string) {
|
||||
for _, e := range elem {
|
||||
if e != "" {
|
||||
if out != "" {
|
||||
out += "/"
|
||||
}
|
||||
out += e
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Cache stores whether buckets are available and their IDs
|
||||
type Cache struct {
|
||||
mu sync.Mutex // mutex to protect created and deleted
|
||||
|
||||
@@ -2,6 +2,7 @@ package bucket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -24,6 +25,25 @@ func TestSplit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoin(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []string
|
||||
want string
|
||||
}{
|
||||
{in: []string{}, want: ""},
|
||||
{in: []string{""}, want: ""},
|
||||
{in: []string{"", ""}, want: ""},
|
||||
{in: []string{"", "b"}, want: "b"},
|
||||
{in: []string{"a", ""}, want: "a"},
|
||||
{in: []string{"a", "b"}, want: "a/b"},
|
||||
{in: []string{"a/b/c", "..", "."}, want: "a/b/c/../."},
|
||||
} {
|
||||
got := Join(test.in...)
|
||||
what := fmt.Sprintf("Join(%q)", test.in)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c := NewCache()
|
||||
errBoom := errors.New("boom")
|
||||
|
||||
Reference in New Issue
Block a user