mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 11:53:17 +00:00
Compare commits
12 Commits
fix-992-rc
...
fix-ssh-ds
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4700f4bf1 | ||
|
|
e4f1e19127 | ||
|
|
4a1b644bfb | ||
|
|
8c9c86c3d6 | ||
|
|
8a58e0235d | ||
|
|
52b7337d28 | ||
|
|
995cd0dc32 | ||
|
|
5eb558e058 | ||
|
|
33d9310c49 | ||
|
|
aba89e2737 | ||
|
|
d685e7b4b5 | ||
|
|
9e4b68a364 |
@@ -15,6 +15,7 @@ Current active maintainers of rclone are:
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -36,6 +35,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -857,6 +858,11 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
@@ -1279,141 +1285,140 @@ type readSeeker struct {
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
if newDigit >= digit {
|
||||
// exit if no carry
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
var totalParts int64
|
||||
for {
|
||||
// Calculate number of parts
|
||||
var remainder int64
|
||||
totalParts, remainder = size/chunkSize, size%chunkSize
|
||||
if remainder != 0 {
|
||||
totalParts++
|
||||
totalParts := -1
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
// size (9.52 TB) so we do not need to part commit block lists
|
||||
// or garbage collect uncommitted blocks.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/chunkSize >= maxTotalParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
||||
}
|
||||
if totalParts < maxTotalParts {
|
||||
break
|
||||
}
|
||||
// Double chunk size if the number of parts is too big
|
||||
chunkSize *= 2
|
||||
if chunkSize > int64(maxChunkSize) {
|
||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
||||
}
|
||||
totalParts = int(size / chunkSize)
|
||||
if size%chunkSize != 0 {
|
||||
totalParts++
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks []string
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
memPool := o.fs.getMemoryPool(chunkSize)
|
||||
outer:
|
||||
for part := 0; part < int(totalParts); part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
||||
position = int64(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
o.fs.uploadToken.Get()
|
||||
buf := memPool.Get()
|
||||
buf = buf[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
o.fs.uploadToken.Put() // return the token
|
||||
break outer
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
nextID()
|
||||
wg.Add(1)
|
||||
go func(part int, position int64, blockID string) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
defer memPool.Put(buf)
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeeker{wrappedReader, bufferReader}
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
}(part, position, blockID)
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
if size >= 0 {
|
||||
remaining -= chunkSize
|
||||
}
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1473,7 +1478,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
@@ -1483,7 +1488,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
@@ -1568,12 +1573,13 @@ func (o *Object) GetTier() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -16,3 +16,20 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []byte
|
||||
want []byte
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
} {
|
||||
increment(test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -705,24 +705,27 @@ var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "encode",
|
||||
Short: "Encode the given filename(s)",
|
||||
Long: `Encode the given filename(s)
|
||||
Long: `This encodes the filenames given as arguments returning a list of
|
||||
strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
Usage Example:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "decode",
|
||||
Short: "Decode the given filename(s)",
|
||||
Long: `Decode the given filename(s)
|
||||
Long: `This decodes the filenames given as arguments returning a list of
|
||||
strings of the decoded results. It will return an error if any of the
|
||||
inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
Usage Example:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -738,20 +741,20 @@ var commandHelp = []fs.CommandHelp{
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make(map[string]string)
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
}
|
||||
out[encryptedFileName] = fileName
|
||||
out = append(out, fileName)
|
||||
}
|
||||
return out, nil
|
||||
case "encode":
|
||||
out := make(map[string]string)
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, fileName := range arg {
|
||||
encryptedFileName := f.EncryptFileName(fileName)
|
||||
out[fileName] = encryptedFileName
|
||||
out = append(out, encryptedFileName)
|
||||
}
|
||||
return out, nil
|
||||
default:
|
||||
|
||||
@@ -2244,7 +2244,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
||||
}
|
||||
createInfo.Description = info.Description
|
||||
// set the description if there is one, or use the default if not
|
||||
if info.Description != "" {
|
||||
createInfo.Description = info.Description
|
||||
}
|
||||
} else {
|
||||
// don't overwrite the description on copy for files
|
||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||
|
||||
@@ -61,6 +61,12 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a putio 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
@@ -70,6 +76,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
httpClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
|
||||
if err != nil {
|
||||
|
||||
@@ -2253,9 +2253,16 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
// return the memory and token
|
||||
memPool.Put(buf)
|
||||
tokens.Put()
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
@@ -2264,10 +2271,12 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 1 { // end if no data and if not first chunk
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
@@ -2276,6 +2285,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
|
||||
off += int64(n)
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
partLength := int64(len(buf))
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
@@ -2313,11 +2323,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
// return the memory and token
|
||||
memPool.Put(buf)
|
||||
tokens.Put()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
|
||||
@@ -1102,19 +1102,18 @@ func (o *Object) stat() error {
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if !o.fs.opt.SetModTime {
|
||||
return nil
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
err = o.stat()
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -597,17 +598,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
for _, u := range f.upstreams {
|
||||
if u.Features().ListR == nil {
|
||||
return errors.Errorf("ListR Unsupported for branch: %s", u.Name())
|
||||
}
|
||||
}
|
||||
var entriess [][]upstream.Entry
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
var mutex sync.Mutex
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
err := u.Features().ListR(ctx, dir, func(entries fs.DirEntries) error {
|
||||
var err error
|
||||
callback := func(entries fs.DirEntries) error {
|
||||
uEntries := make([]upstream.Entry, len(entries))
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
@@ -616,7 +613,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
entriess = append(entriess, uEntries)
|
||||
mutex.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
do := u.Features().ListR
|
||||
if do != nil {
|
||||
err = do(ctx, dir, callback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
@@ -813,6 +816,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
features = features.Mask(f) // Mask all upstream fs
|
||||
}
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range upstreams {
|
||||
if u.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
|
||||
136
bin/check-merged.go
Normal file
136
bin/check-merged.go
Normal file
@@ -0,0 +1,136 @@
|
||||
// +build ignore
|
||||
|
||||
// Attempt to work out if branches have already been merged
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
master = flag.String("master", "master", "Branch to work out if merged into")
|
||||
version = "development version" // overridden by goreleaser
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: %s [options]
|
||||
Version: %s
|
||||
|
||||
Attempt to work out if in the current git repo branches have been
|
||||
merged into master.
|
||||
|
||||
Example usage:
|
||||
|
||||
%s
|
||||
|
||||
Full options:
|
||||
`, os.Args[0], version, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
var (
|
||||
printedSep = false
|
||||
)
|
||||
|
||||
const (
|
||||
sep1 = "============================================================"
|
||||
sep2 = "------------------------------------------------------------"
|
||||
)
|
||||
|
||||
// Show the diff between two git revisions
|
||||
func gitDiffDiff(rev1, rev2 string) {
|
||||
fmt.Printf("Diff of diffs of %q and %q\n", rev1, rev2)
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(git show "%s") <(git show "%s")`, rev1, rev2))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 {
|
||||
// OK just different
|
||||
} else {
|
||||
log.Fatalf("git diff diff failed: %#v", err)
|
||||
}
|
||||
}
|
||||
_, _ = os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
var reCommit = regexp.MustCompile(`commit ([0-9a-f]{32,})`)
|
||||
|
||||
// Grep the git log for logLine
|
||||
func gitLogGrep(branch, rev, logLine string) {
|
||||
cmd := exec.Command("git", "log", "--grep", regexp.QuoteMeta(logLine), *master)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("git log grep failed: %v", err)
|
||||
}
|
||||
if len(out) > 0 {
|
||||
if !printedSep {
|
||||
fmt.Println(sep1)
|
||||
printedSep = true
|
||||
}
|
||||
fmt.Printf("Branch: %s - MAY BE MERGED to %s\nLog: %s\n\n", branch, *master, logLine)
|
||||
_, _ = os.Stdout.Write(out)
|
||||
match := reCommit.FindSubmatch(out)
|
||||
if len(match) != 0 {
|
||||
commit := string(match[1])
|
||||
fmt.Println(sep2)
|
||||
gitDiffDiff(rev, commit)
|
||||
}
|
||||
fmt.Println(sep1)
|
||||
}
|
||||
}
|
||||
|
||||
// * b2-fix-download-url 4209c768a [gone] b2: fix transfers when using download_url
|
||||
var reLine = regexp.MustCompile(`^[ *] (\S+)\s+([0-9a-f]+)\s+(?:\[[^]]+\] )?(.*)$`)
|
||||
|
||||
// Run git branch -v, parse the output and check it in the log
|
||||
func gitBranch() {
|
||||
cmd := exec.Command("git", "branch", "-v")
|
||||
cmd.Stderr = os.Stderr
|
||||
out, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatalf("git branch pipe failed: %v", err)
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("git branch failed: %v", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(out)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match := reLine.FindStringSubmatch(line)
|
||||
if len(match) != 4 {
|
||||
log.Printf("Invalid line %q", line)
|
||||
continue
|
||||
}
|
||||
branch, rev, logLine := match[1], match[2], match[3]
|
||||
if branch == *master {
|
||||
continue
|
||||
}
|
||||
//fmt.Printf("branch = %q, rev = %q, logLine = %q\n", branch, rev, logLine)
|
||||
gitLogGrep(branch, rev, logLine)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("failed reading git branch: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Fatalf("git branch wait failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
usage()
|
||||
log.Fatal("Wrong number of arguments")
|
||||
}
|
||||
gitBranch()
|
||||
}
|
||||
@@ -18,12 +18,14 @@ import (
|
||||
|
||||
var (
|
||||
options []string
|
||||
useJSON bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -97,15 +99,23 @@ Note to run these commands on a running backend then see
|
||||
|
||||
}
|
||||
// Output the result
|
||||
switch x := out.(type) {
|
||||
case nil:
|
||||
case string:
|
||||
fmt.Println(out)
|
||||
case []string:
|
||||
for line := range x {
|
||||
fmt.Println(line)
|
||||
writeJSON := false
|
||||
if useJSON {
|
||||
writeJSON = true
|
||||
} else {
|
||||
switch x := out.(type) {
|
||||
case nil:
|
||||
case string:
|
||||
fmt.Println(out)
|
||||
case []string:
|
||||
for _, line := range x {
|
||||
fmt.Println(line)
|
||||
}
|
||||
default:
|
||||
writeJSON = true
|
||||
}
|
||||
default:
|
||||
}
|
||||
if writeJSON {
|
||||
// Write indented JSON to the output
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", "\t")
|
||||
|
||||
@@ -336,7 +336,7 @@ operations more efficient.
|
||||
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| Mega | Yes | No | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes |
|
||||
| Memory | No | Yes | No | No | No | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | No [#575](https://github.com/rclone/rclone/issues/575) | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | No |
|
||||
|
||||
2
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@@ -412,7 +412,7 @@ func checkDSAParams(param *dsa.Parameters) error {
|
||||
// SSH specifies FIPS 186-2, which only provided a single size
|
||||
// (1024 bits) DSA key. FIPS 186-3 allows for larger key
|
||||
// sizes, which would confuse SSH.
|
||||
if l := param.P.BitLen(); l != 1024 {
|
||||
if l := param.P.BitLen(); l != 1024 && l != 2048 {
|
||||
return fmt.Errorf("ssh: unsupported DSA key size %d", l)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user