1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-06 03:33:54 +00:00

Compare commits

..

3 Commits

Author SHA1 Message Date
Nick Craig-Wood
2f0ef2e983 s3: fix v2 auth for multipart server side copy
Before this change the v2 signer sorted the headers for signing as
joined key:value pairs. However this put these two headers in the
wrong order.

    x-amz-copy-source-range:
    x-amz-copy-source:

This changes sorts on the keys before joining the values producing the
correct sort order.

    x-amz-copy-source:
    x-amz-copy-source-range:

This commit also adds some missing query parameters for signing that I
spotted in the s3cmd source.
2020-10-26 12:43:54 +00:00
Nick Craig-Wood
a8db0be891 s3: fix KS3 problem where multipart uploads have valid md5sums as Etags
KS3 appears to return an Etag which is a valid MD5SUM for multipart
uploads. This confuses rclone which is expecting an invalid MD5SUM
here.

This patch works around that by clearing the Etag on the object
returned from a multipart upload if it matches a valid MD5SUM.
2020-10-26 12:43:54 +00:00
Nick Craig-Wood
c85438d34b s3: fix v2 auth when using force_path_style = false
The V2 auth was failing with AWS and KS3 when used with
force_path_style = false (which is the default for both providers
now).

The V2 Auth needed the bucket prepended onto the string that gets
signed.

Note that endpoint must be set when using v2 auth with AWS. The code
warns about this.

This was worked out by observing the behaviour of s3cmd.
2020-10-26 12:43:54 +00:00
11 changed files with 137 additions and 277 deletions

View File

@@ -107,10 +107,10 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
- name: Install Libraries on Linux
shell: bash
@@ -135,10 +135,10 @@ jobs:
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
@@ -225,8 +225,8 @@ jobs:
- name: Set environment variables
shell: bash
run: |
echo 'GOPATH=${{ runner.workspace }}' >> $GITHUB_ENV
echo '${{ runner.workspace }}/bin' >> $GITHUB_PATH
echo '::set-env name=GOPATH::${{ runner.workspace }}'
echo '::add-path::${{ runner.workspace }}/bin'
- name: Cross-compile rclone
run: |

View File

@@ -93,8 +93,8 @@ build_dep:
# Get the release dependencies we only install on linux
release_dep_linux:
cd /tmp && go get github.com/goreleaser/nfpm/...
cd /tmp && go get github.com/github-release/github-release
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
# Get the release dependencies we only install on Windows
release_dep_windows:

View File

@@ -1130,12 +1130,6 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
switch {
case c.fs.useMD5:
srcObj := fs.UnWrapObjectInfo(src)
if srcObj != nil && srcObj.Fs().Features().SlowHash {
fs.Debugf(src, "skip slow MD5 on source file, hashing in-transit")
c.hasher = md5.New()
break
}
if c.md5, _ = src.Hash(ctx, hash.MD5); c.md5 == "" {
if c.fs.hashFallback {
c.sha1, _ = src.Hash(ctx, hash.SHA1)
@@ -1144,12 +1138,6 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
}
}
case c.fs.useSHA1:
srcObj := fs.UnWrapObjectInfo(src)
if srcObj != nil && srcObj.Fs().Features().SlowHash {
fs.Debugf(src, "skip slow SHA1 on source file, hashing in-transit")
c.hasher = sha1.New()
break
}
if c.sha1, _ = src.Hash(ctx, hash.SHA1); c.sha1 == "" {
if c.fs.hashFallback {
c.md5, _ = src.Hash(ctx, hash.MD5)

View File

@@ -102,7 +102,6 @@ func init() {
This feature is called "speedup" or "put by hash". It is especially efficient
in case of generally available files like popular books, video or audio clips,
because files are searched by hash in all accounts of all mailru users.
It is meaningless and ineffective if source file is unique or encrypted.
Please note that rclone may need local memory and disk space to calculate
content hash in advance and decide whether full upload is required.
Also, if rclone does not know file size in advance (e.g. in case of
@@ -193,7 +192,7 @@ This option must not be used by an ordinary user. It is intended only to
facilitate remote troubleshooting of backend issues. Strict meaning of
flags is not documented and not guaranteed to persist between releases.
Quirks will be removed when the backend grows stable.
Supported quirks: atomicmkdir binlist unknowndirs`,
Supported quirks: atomicmkdir binlist`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -382,7 +381,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
type quirks struct {
binlist bool
atomicmkdir bool
unknowndirs bool
}
func (q *quirks) parseQuirks(option string) {
@@ -403,9 +401,6 @@ func (q *quirks) parseQuirks(option string) {
// use mkdir as a locking primitive and depend on its atomicity.
// Remove this quirk when the above issue is investigated.
q.atomicmkdir = true
case "unknowndirs":
// Accepts unknown resource types as folders.
q.unknowndirs = true
default:
// Ignore unknown flags
}
@@ -523,7 +518,7 @@ func (f *Fs) relPath(absPath string) (string, error) {
return "", fmt.Errorf("path %q should be under %q", absPath, f.root)
}
// metaServer returns URL of current meta server
// metaServer ...
func (f *Fs) metaServer(ctx context.Context) (string, error) {
f.metaMu.Lock()
defer f.metaMu.Unlock()
@@ -628,56 +623,33 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
if err != nil {
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
return nil, -1, err
}
if isDir {
dir := fs.NewDir(remote, modTime).SetSize(item.Size)
return dir, item.Count.Files + item.Count.Folders, nil
}
binHash, err := mrhash.DecodeString(item.Hash)
if err != nil {
return nil, -1, err
}
file := &Object{
fs: f,
remote: remote,
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: modTime,
}
return file, -1, nil
}
// isDir returns true for directories, false for files
func (f *Fs) isDir(kind, path string) (bool, error) {
switch kind {
case "":
return false, errors.New("empty resource type")
case "file":
return false, nil
switch item.Kind {
case "folder":
// fall thru
case "camera-upload", "mounted", "shared":
fs.Debugf(f, "[%s]: folder has type %q", path, kind)
default:
if !f.quirks.unknowndirs {
return false, fmt.Errorf("unknown resource type %q", kind)
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
binHash, err := mrhash.DecodeString(item.Hash)
if err != nil {
return nil, -1, err
}
fs.Errorf(f, "[%s]: folder has unknown type %q", path, kind)
file := &Object{
fs: f,
remote: remote,
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(mTime, 0),
}
return file, -1, nil
default:
return nil, -1, fmt.Errorf("Unknown resource type %q", item.Kind)
}
return true, nil
}
// List the objects and directories in dir into entries.
@@ -745,11 +717,7 @@ func (f *Fs) listM1(ctx context.Context, dirPath string, offset int, limit int)
return nil, err
}
isDir, err := f.isDir(info.Body.Kind, dirPath)
if err != nil {
return nil, err
}
if !isDir {
if info.Body.Kind != "folder" {
return nil, fs.ErrorIsFile
}
@@ -1602,28 +1570,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var (
fileBuf []byte
fileHash []byte
newHash []byte
slowHash bool
localSrc bool
fileBuf []byte
fileHash []byte
newHash []byte
trySpeedup bool
)
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
srcFeatures := srcObj.Fs().Features()
slowHash = srcFeatures.SlowHash
localSrc = srcFeatures.IsLocal
}
// Try speedup if it's globally enabled but skip extra post
// request if file is small and fits in the metadata request
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
// Try to get the hash if it's instant
if trySpeedup && !slowHash {
// Don't disturb the source if file fits in hash.
// Skip an extra speedup request if file fits in hash.
if size > mrhash.Size {
// Request hash from source.
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
fileHash, _ = mrhash.DecodeString(srcHash)
}
if fileHash != nil {
// Try speedup if it's globally enabled and source hash is available.
trySpeedup = o.fs.opt.SpeedupEnable
if trySpeedup && fileHash != nil {
if o.putByHash(ctx, fileHash, src, "source") {
return nil
}
@@ -1632,22 +1595,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Need to calculate hash, check whether file is still eligible for speedup
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
// Attempt to put by hash if file is local and eligible
if trySpeedup && localSrc {
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
fileHash, _ = mrhash.DecodeString(srcHash)
}
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
return nil
}
// If local file hashing has failed, it's pointless to try anymore
trySpeedup = false
if trySpeedup {
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
}
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
//fs.Debugf(o, "attempt to put by hash from memory")
fileBuf, err = ioutil.ReadAll(in)
if err != nil {
return err
@@ -1777,7 +1731,6 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
return nil
}
// putByHash is a thin wrapper around addFileMetaData
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
oNew := new(Object)
*oNew = *o
@@ -2204,7 +2157,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Discard the beginning of the data
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err
}
}

View File

@@ -1190,7 +1190,8 @@ rclone does if you know the bucket exists already.
// - trailing / encoding
// so that AWS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash,
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
@@ -1386,8 +1387,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
fs.Debugf(nil, "SPLIT %q %q", f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath))
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -1500,9 +1500,6 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
awsConfig.WithEndpoint(opt.Endpoint)
}
// Allow URI with "." etc
awsConfig.DisableRestProtocolURICleaning = aws.Bool(true)
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
@@ -1526,7 +1523,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
v2sign(opt, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
@@ -1737,7 +1734,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
// bucket to the start.
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
if prefix != "" {
prefix += "/"
}
@@ -1768,7 +1765,7 @@ func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, add
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &bucketName,
Bucket: &bucket,
Delimiter: &delimiter,
Prefix: &directory,
MaxKeys: &f.opt.ListChunk,
@@ -1808,7 +1805,7 @@ func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, add
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucketName)
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
@@ -1836,7 +1833,7 @@ func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, add
}
remote = remote[len(prefix):]
if addBucket {
remote = bucket.Join(bucketName, remote)
remote = path.Join(bucket, remote)
}
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
@@ -1864,7 +1861,7 @@ func (f *Fs) list(ctx context.Context, bucketName, directory, prefix string, add
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = bucket.Join(bucketName, remote)
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
@@ -2150,7 +2147,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.Bucket = &dstBucket
req.ACL = &f.opt.ACL
req.Key = &dstPath
source := pathEscape(bucket.Join(srcBucket, srcPath))
source := pathEscape(path.Join(srcBucket, srcPath))
req.CopySource = &source
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
@@ -3205,6 +3202,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
// Empty an Etag which is a valid md5sum for multipart
// uploads. This works around a bug in KS3 where the ETag is a
// correctly formed md5sum for multpart uploads
if multipart && matchMd5.MatchString(strings.Trim(strings.ToLower(o.etag), `"`)) {
o.etag = ""
}
return err
}

View File

@@ -9,7 +9,10 @@ import (
"net/http"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// URL parameters that need to be added to the signature
@@ -33,12 +36,20 @@ var s3ParamsToSign = map[string]struct{}{
"response-cache-control": {},
"response-content-disposition": {},
"response-content-encoding": {},
"lifecycle": {},
"website": {},
"delete": {},
"cors": {},
"restore": {},
}
// Warn once about empty endpoint
var warnEmptyEndpointOnce sync.Once
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func sign(AccessKey, SecretKey string, req *http.Request) {
func v2sign(opt *Options, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
@@ -48,11 +59,26 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
if uri == "" {
uri = "/"
}
// If not using path style then need to stick the bucket on
// the start of the requests if doing a bucket based query
if !opt.ForcePathStyle {
if opt.Endpoint == "" {
warnEmptyEndpointOnce.Do(func() {
fs.Logf(nil, "If using v2 auth with AWS and force_path_style=false, endpoint must be set in the config")
})
} else if req.URL.Host != opt.Endpoint {
// read the bucket off the start of the hostname
i := strings.IndexRune(req.URL.Host, '.')
if i >= 0 {
uri = "/" + req.URL.Host[:i] + uri
}
}
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign []string
var headersToSign [][2]string // slice of key, value pairs
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
@@ -63,15 +89,26 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, k+":"+vall)
headersToSign = append(headersToSign, [2]string{k, vall})
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
// sort by keys
sort.Slice(headersToSign, func(i, j int) bool {
return headersToSign[i][0] < headersToSign[j][0]
})
// join into key:value\n
var out strings.Builder
for _, kv := range headersToSign {
out.WriteString(kv[0])
out.WriteRune(':')
out.WriteString(kv[1])
out.WriteRune('\n')
}
joinedHeadersToSign = out.String()
}
// Look for query parameters which need to be added to the signature
@@ -96,11 +133,11 @@ func sign(AccessKey, SecretKey string, req *http.Request) {
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(SecretKey))
hash := hmac.New(sha1.New, []byte(opt.SecretAccessKey))
_, _ = hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
req.Header.Set("Authorization", "AWS "+opt.AccessKeyID+":"+string(signature))
}

View File

@@ -83,8 +83,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("mount", &Opt)
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.")
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not Windows).")
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not Windows).")
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user.")
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users.")
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
@@ -101,7 +101,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
}
}
// Check if folder is empty
// Check is folder is empty
func checkMountEmpty(mountpoint string) error {
fp, fpErr := os.Open(mountpoint)
@@ -359,24 +359,15 @@ When --vfs-read-chunk-size-limit 500M is specified, the result would be
defer cmd.StartStats()()
}
// Inform about ignored flags on Windows,
// and if not on Windows and not --allow-non-empty flag is used
// verify that mountpoint is empty.
if runtime.GOOS == "windows" {
if opt.AllowNonEmpty {
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
}
if opt.AllowRoot {
fs.Logf(nil, "--allow-root flag does nothing on Windows")
}
if opt.AllowOther {
fs.Logf(nil, "--allow-other flag does nothing on Windows")
}
} else if !opt.AllowNonEmpty {
// Skip checkMountEmpty if --allow-non-empty flag is used or if
// the Operating System is Windows
if !opt.AllowNonEmpty && runtime.GOOS != "windows" {
err := checkMountEmpty(mountpoint)
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
} else if opt.AllowNonEmpty && runtime.GOOS == "windows" {
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
}
// Work out the volume name, removing special

View File

@@ -71,7 +71,7 @@ func helpText() (tr []string) {
" ←,h to return",
" c toggle counts",
" g toggle graph",
" n,s,C,A sort by name,size,count,average size",
" n,s,C sort by name,size,count",
" d delete file/directory",
}
if !clipboard.Unsupported {
@@ -88,28 +88,27 @@ func helpText() (tr []string) {
// UI contains the state of the user interface
type UI struct {
f fs.Fs // fs being displayed
fsName string // human name of Fs
root *scan.Dir // root directory
d *scan.Dir // current directory being displayed
path string // path of current directory
showBox bool // whether to show a box
boxText []string // text to show in box
boxMenu []string // box menu options
boxMenuButton int
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
entries fs.DirEntries // entries of current directory
sortPerm []int // order to display entries in after sorting
invSortPerm []int // inverse order
dirListHeight int // height of listing
listing bool // whether listing is in progress
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByCount int8
sortByAverageSize int8
dirPosMap map[string]dirPos // store for directory positions
f fs.Fs // fs being displayed
fsName string // human name of Fs
root *scan.Dir // root directory
d *scan.Dir // current directory being displayed
path string // path of current directory
showBox bool // whether to show a box
boxText []string // text to show in box
boxMenu []string // box menu options
boxMenuButton int
boxMenuHandler func(fs fs.Fs, path string, option int) (string, error)
entries fs.DirEntries // entries of current directory
sortPerm []int // order to display entries in after sorting
invSortPerm []int // inverse order
dirListHeight int // height of listing
listing bool // whether listing is in progress
showGraph bool // toggle showing graph
showCounts bool // toggle showing counts
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
sortBySize int8
sortByCount int8
dirPosMap map[string]dirPos // store for directory positions
}
// Where we have got to in the directory listing
@@ -497,17 +496,9 @@ type ncduSort struct {
// Less is part of sort.Interface.
func (ds *ncduSort) Less(i, j int) bool {
var iAvgSize, jAvgSize float64
isize, icount, _, _ := ds.d.AttrI(ds.sortPerm[i])
jsize, jcount, _, _ := ds.d.AttrI(ds.sortPerm[j])
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
if icount > 0 {
iAvgSize = float64(isize / icount)
}
if jcount > 0 {
jAvgSize = float64(jsize / jcount)
}
switch {
case ds.u.sortByName < 0:
return iname > jname
@@ -529,18 +520,6 @@ func (ds *ncduSort) Less(i, j int) bool {
if icount != jcount {
return icount > jcount
}
case ds.u.sortByAverageSize < 0:
if iAvgSize != jAvgSize {
return iAvgSize < jAvgSize
}
// if avgSize is equal, sort by size
return isize < jsize
case ds.u.sortByAverageSize > 0:
if iAvgSize != jAvgSize {
return iAvgSize > jAvgSize
}
// if avgSize is equal, sort by size
return isize > jsize
}
// if everything equal, sort by name
return iname < jname
@@ -649,7 +628,6 @@ func (u *UI) toggleSort(sortType *int8) {
u.sortBySize = 0
u.sortByCount = 0
u.sortByName = 0
u.sortByAverageSize = 0
if old == 0 {
*sortType = 1
} else {
@@ -764,8 +742,6 @@ outer:
u.toggleSort(&u.sortBySize)
case 'C':
u.toggleSort(&u.sortByCount)
case 'A':
u.toggleSort(&u.sortByAverageSize)
case 'y':
u.copyPath()
case 'Y':

View File

@@ -5,56 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.53.2 - 2020-10-26
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
* Bug Fixes
* acounting
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
* operations
* Fix use of --suffix without --backup-dir (Nick Craig-Wood)
* Fix spurious "--checksum is in use but the source and destination have no hashes in common" (Nick Craig-Wood)
* build
* Work around GitHub actions brew problem (Nick Craig-Wood)
* Stop using set-env and set-path in the GitHub actions (Nick Craig-Wood)
* Mount
* mount2: Fix the swapped UID / GID values (Russell Cattelan)
* VFS
* Detect and recover from a file being removed externally from the cache (Nick Craig-Wood)
* Fix a deadlock vulnerability in downloaders.Close (Leo Luan)
* Fix a race condition in retryFailedResets (Leo Luan)
* Fix missed concurrency control between some item operations and reset (Leo Luan)
* Add exponential backoff during ENOSPC retries (Leo Luan)
* Add a missed update of used cache space (Leo Luan)
* Fix --no-modtime to not attempt to set modtimes (as documented) (Nick Craig-Wood)
* Local
* Fix sizes and syncing with --links option on Windows (Nick Craig-Wood)
* Chunker
* Disable ListR to fix missing files on GDrive (workaround) (Ivan Andreev)
* Fix upload over crypt (Ivan Andreev)
* Fichier
* Increase maximum file size from 100GB to 300GB (gyutw)
* Jottacloud
* Remove clientSecret from config when upgrading to token based authentication (buengese)
* Avoid double url escaping of device/mountpoint (albertony)
* Remove DirMove workaround as it's not required anymore - also (buengese)
* Mailru
* Fix uploads after recent changes on server (Ivan Andreev)
* Fix range requests after june changes on server (Ivan Andreev)
* Fix invalid timestamp on corrupted files (fixes) (Ivan Andreev)
* Onedrive
* Fix disk usage for sharepoint (Nick Craig-Wood)
* S3
* Add missing regions for AWS (Anagh Kumar Baranwal)
* Seafile
* Fix accessing libraries > 2GB on 32 bit systems (Muffin King)
* SFTP
* Always convert the checksum to lower case (buengese)
* Union
* Create root directories if none exist (Nick Craig-Wood)
## v1.53.1 - 2020-09-13
[See commits](https://github.com/rclone/rclone/compare/v1.53.0...v1.53.1)

View File

@@ -29,23 +29,6 @@ func Split(absPath string) (bucket, bucketPath string) {
return absPath[:slash], absPath[slash+1:]
}
// Join joins any number of path elements into a single path, adding a
// separating slash if necessary. Empty elements are ignored.
//
// Unlike path.Join this does not run path.Clean on the elements so a
// path called "." will be preserved.
func Join(elem ...string) (out string) {
for _, e := range elem {
if e != "" {
if out != "" {
out += "/"
}
out += e
}
}
return out
}
// Cache stores whether buckets are available and their IDs
type Cache struct {
mu sync.Mutex // mutex to protect created and deleted

View File

@@ -2,7 +2,6 @@ package bucket
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
@@ -25,25 +24,6 @@ func TestSplit(t *testing.T) {
}
}
func TestJoin(t *testing.T) {
for _, test := range []struct {
in []string
want string
}{
{in: []string{}, want: ""},
{in: []string{""}, want: ""},
{in: []string{"", ""}, want: ""},
{in: []string{"", "b"}, want: "b"},
{in: []string{"a", ""}, want: "a"},
{in: []string{"a", "b"}, want: "a/b"},
{in: []string{"a/b/c", "..", "."}, want: "a/b/c/../."},
} {
got := Join(test.in...)
what := fmt.Sprintf("Join(%q)", test.in)
assert.Equal(t, test.want, got, what)
}
}
func TestCache(t *testing.T) {
c := NewCache()
errBoom := errors.New("boom")