mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1 Commits
fix-ssh-ds
...
vfs-refact
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40b9e312c6 |
@@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
|
||||
@@ -2,20 +2,17 @@
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | tardigrade backend |
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -59,11 +59,9 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
|
||||
@@ -31,12 +31,10 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/tardigrade"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -34,9 +35,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -61,8 +59,6 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -129,28 +125,6 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -167,19 +141,16 @@ This option controls how often unused buffers will be removed from the pool.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Account string `config:"account"`
|
||||
Key string `config:"key"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@@ -198,7 +169,6 @@ type Fs struct {
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a azure object
|
||||
@@ -287,12 +257,6 @@ var retryErrorCodes = []int{
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// FIXME interpret special errors - more to do here
|
||||
if storageErr, ok := err.(azblob.StorageError); ok {
|
||||
switch storageErr.ServiceCode() {
|
||||
case "InvalidBlobOrBlock":
|
||||
// These errors happen sometimes in multipart uploads
|
||||
// because of block concurrency issues
|
||||
return true, err
|
||||
}
|
||||
statusCode := storageErr.Response().StatusCode
|
||||
for _, e := range retryErrorCodes {
|
||||
if statusCode == e {
|
||||
@@ -418,12 +382,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client: fshttp.NewClient(fs.Config),
|
||||
cache: bucket.NewCache(),
|
||||
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
fs.Config.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -858,11 +816,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return fs, fs.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
container, _ := f.split(dir)
|
||||
@@ -872,10 +825,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
// makeContainer creates the container if it doesn't exist
|
||||
func (f *Fs) makeContainer(ctx context.Context, container string) error {
|
||||
return f.cache.Create(container, func() error {
|
||||
// If this is a SAS URL limited to a container then assume it is already created
|
||||
if f.isLimited {
|
||||
return nil
|
||||
}
|
||||
// now try to create the container
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
|
||||
@@ -1029,19 +978,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1285,116 +1221,103 @@ type readSeeker struct {
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
if newDigit >= digit {
|
||||
// exit if no carry
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||
// Calculate correct chunkSize
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
totalParts := -1
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
// size (9.52 TB) so we do not need to part commit block lists
|
||||
// or garbage collect uncommitted blocks.
|
||||
//
|
||||
// See: https://docs.microsoft.com/en-gb/rest/api/storageservices/put-block
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 4MB). With a maximum number of parts (50,000) this will be a file of
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(chunkSize*maxTotalParts))
|
||||
})
|
||||
} else {
|
||||
// Adjust partSize until the number of parts is small enough.
|
||||
if size/chunkSize >= maxTotalParts {
|
||||
// Calculate partition size rounded up to the nearest MB
|
||||
chunkSize = (((size / maxTotalParts) >> 20) + 1) << 20
|
||||
var totalParts int64
|
||||
for {
|
||||
// Calculate number of parts
|
||||
var remainder int64
|
||||
totalParts, remainder = size/chunkSize, size%chunkSize
|
||||
if remainder != 0 {
|
||||
totalParts++
|
||||
}
|
||||
if totalParts < maxTotalParts {
|
||||
break
|
||||
}
|
||||
// Double chunk size if the number of parts is too big
|
||||
chunkSize *= 2
|
||||
if chunkSize > int64(maxChunkSize) {
|
||||
return errors.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), totalParts, fs.SizeSuffix(chunkSize/2))
|
||||
}
|
||||
totalParts = int(size / chunkSize)
|
||||
if size%chunkSize != 0 {
|
||||
totalParts++
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// https://godoc.org/github.com/Azure/azure-storage-blob-go/2017-07-29/azblob#example-BlockBlobURL
|
||||
// Utilities are cloned from above example
|
||||
// These helper functions convert a binary block ID to a base-64 string and vice versa
|
||||
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
|
||||
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
|
||||
// These helper functions convert an int block ID to a base-64 string and vice versa
|
||||
blockIDIntToBase64 := func(blockID uint64) string {
|
||||
binaryBlockID := (&[8]byte{})[:] // All block IDs are 8 bytes long
|
||||
binary.LittleEndian.PutUint64(binaryBlockID, blockID)
|
||||
return blockIDBinaryToBase64(binaryBlockID)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, fs.SizeSuffix(chunkSize))
|
||||
// block ID variables
|
||||
var (
|
||||
rawID uint64
|
||||
blockID = "" // id in base64 encoded form
|
||||
blocks []string
|
||||
)
|
||||
|
||||
// increment the blockID
|
||||
nextID := func() {
|
||||
rawID++
|
||||
blockID = blockIDIntToBase64(rawID)
|
||||
blocks = append(blocks, blockID)
|
||||
}
|
||||
|
||||
// Get BlockBlobURL, we will use default pipeline here
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
ctx := context.Background()
|
||||
ac := azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = size // remaining size in file for logging only, -1 if size < 0
|
||||
position = int64(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(chunkSize) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
blockBlobURL = blob.ToBlockBlobURL() // Get BlockBlobURL, we will use default pipeline here
|
||||
ac = azblob.LeaseAccessConditions{} // Use default lease access conditions
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
o.fs.uploadToken.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
o.fs.uploadToken.Put() // return the token
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < int(totalParts); part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
reqSize := remaining
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
nextID()
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64, blockID string) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
@@ -1406,19 +1329,28 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
_, err = blockBlobURL.StageBlock(ctx, blockID, &rs, ac, transactionalMD5)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(part, position, blockID)
|
||||
|
||||
// ready for next block
|
||||
if size >= 0 {
|
||||
remaining -= chunkSize
|
||||
}
|
||||
remaining -= chunkSize
|
||||
position += chunkSize
|
||||
}
|
||||
err = g.Wait()
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1457,14 +1389,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||
// in order to validate its integrity during transport
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1478,7 +1408,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// is merged the SDK can't upload a single blob of exactly the chunk
|
||||
// size, so upload with a multpart upload to work around.
|
||||
// See: https://github.com/rclone/rclone/issues/2653
|
||||
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
multipartUpload := size >= int64(o.fs.opt.UploadCutoff)
|
||||
if size == int64(o.fs.opt.ChunkSize) {
|
||||
multipartUpload = true
|
||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||
@@ -1488,7 +1418,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
if multipartUpload {
|
||||
// If a large file upload in chunks
|
||||
err = o.uploadMultipart(ctx, in, size, &blob, &httpHeaders)
|
||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||
} else {
|
||||
// Write a small blob in one transaction
|
||||
blockBlobURL := blob.ToBlockBlobURL()
|
||||
@@ -1573,13 +1503,12 @@ func (o *Object) GetTier() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -16,20 +16,3 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []byte
|
||||
want []byte
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
} {
|
||||
increment(test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,13 +124,8 @@ minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files
|
||||
|
||||
Normally rclone will calculate the SHA1 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -673,7 +668,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
remote := file.Name[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
@@ -1375,21 +1370,6 @@ func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Clean the SHA1
|
||||
//
|
||||
// Make sure it is lower case
|
||||
//
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
func cleanSHA1(sha1 string) (out string) {
|
||||
out = strings.ToLower(sha1)
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(out, unverified) {
|
||||
out = out[len(unverified):]
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// decodeMetaDataRaw sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
@@ -1405,7 +1385,12 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if o.sha1 == "" || o.sha1 == "none" {
|
||||
o.sha1 = Info[sha1Key]
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(o.sha1, unverified) {
|
||||
o.sha1 = o.sha1[len(unverified):]
|
||||
}
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
@@ -1663,7 +1648,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
o.sha1 = resp.Header.Get(sha1InfoHeader)
|
||||
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
|
||||
}
|
||||
o.sha1 = cleanSHA1(o.sha1)
|
||||
}
|
||||
// Don't check length or hash on partial content
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
@@ -1832,7 +1816,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: in,
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
|
||||
|
||||
@@ -184,6 +184,13 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -53,7 +53,8 @@ const (
|
||||
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootID = "0" // ID of root folder is always this
|
||||
rootURL = "https://api.box.com/2.0"
|
||||
uploadURL = "https://upload.box.com/api/2.0"
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
@@ -88,7 +89,22 @@ func init() {
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
@@ -105,11 +121,6 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
@@ -152,26 +163,6 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
@@ -194,6 +185,7 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
@@ -244,7 +236,6 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -402,27 +393,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(jsonFile, boxSubType, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
} else {
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Get rootFolderID
|
||||
rootID := f.opt.RootFolderID
|
||||
// Get rootID
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
@@ -1190,7 +1167,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
@@ -1209,7 +1186,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
|
||||
MultipartContentName: "contents",
|
||||
MultipartFileName: upload.Name,
|
||||
RootURL: uploadURL,
|
||||
Options: options,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
@@ -1251,9 +1227,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -64,7 +64,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
@@ -172,7 +171,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
@@ -237,7 +236,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
|
||||
27
backend/cache/cache.go
vendored
27
backend/cache/cache.go
vendored
@@ -65,7 +65,6 @@ func init() {
|
||||
Name: "cache",
|
||||
Description: "Cache a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -1883,31 +1882,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "stats",
|
||||
Short: "Print stats on the cache backend in JSON format.",
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
switch name {
|
||||
case "stats":
|
||||
return f.Stats()
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1925,5 +1899,4 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
)
|
||||
|
||||
24
backend/cache/storage_persistent.go
vendored
24
backend/cache/storage_persistent.go
vendored
@@ -16,10 +16,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -980,6 +980,15 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
|
||||
// TO BE USED IN TESTING ONLY
|
||||
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
|
||||
item.Started = true
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -1027,6 +1036,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
|
||||
})
|
||||
}
|
||||
|
||||
// PurgeTempUploads will remove all the pending uploads from the queue
|
||||
// TO BE USED IN TESTING ONLY
|
||||
func (b *Persistent) PurgeTempUploads() {
|
||||
b.tempQueueMux.Lock()
|
||||
defer b.tempQueueMux.Unlock()
|
||||
|
||||
_ = b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Close should be called when the program ends gracefully
|
||||
func (b *Persistent) Close() {
|
||||
b.cleanupMux.Lock()
|
||||
|
||||
23
backend/cache/utils_test.go
vendored
23
backend/cache/utils_test.go
vendored
@@ -1,23 +0,0 @@
|
||||
package cache
|
||||
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
// PurgeTempUploads will remove all the pending uploads from the queue
|
||||
func (b *Persistent) PurgeTempUploads() {
|
||||
b.tempQueueMux.Lock()
|
||||
defer b.tempQueueMux.Unlock()
|
||||
|
||||
_ = b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
|
||||
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
|
||||
item.Started = true
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -71,6 +71,30 @@ type ReadSeekCloser interface {
|
||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||
|
||||
// Cipher is used to swap out the encryption implementations
|
||||
type Cipher interface {
|
||||
// EncryptFileName encrypts a file path
|
||||
EncryptFileName(string) string
|
||||
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
|
||||
DecryptFileName(string) (string, error)
|
||||
// EncryptDirName encrypts a directory path
|
||||
EncryptDirName(string) string
|
||||
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
|
||||
DecryptDirName(string) (string, error)
|
||||
// EncryptData
|
||||
EncryptData(io.Reader) (io.Reader, error)
|
||||
// DecryptData
|
||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||
// DecryptDataSeek decrypt at a given position
|
||||
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
EncryptedSize(int64) int64
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
DecryptedSize(int64) (int64, error)
|
||||
// NameEncryptionMode returns the used mode for name handling
|
||||
NameEncryptionMode() NameEncryptionMode
|
||||
}
|
||||
|
||||
// NameEncryptionMode is the type of file name encryption in use
|
||||
type NameEncryptionMode int
|
||||
|
||||
@@ -112,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
type cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
nameKey [32]byte // 16,24 or 32 bytes
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
@@ -125,8 +148,8 @@ type Cipher struct {
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
|
||||
c := &cipher{
|
||||
mode: mode,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
@@ -149,7 +172,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
||||
//
|
||||
// Note that empty passsword makes all 0x00 keys which is used in the
|
||||
// tests.
|
||||
func (c *Cipher) Key(password, salt string) (err error) {
|
||||
func (c *cipher) Key(password, salt string) (err error) {
|
||||
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
|
||||
var saltBytes = defaultSalt
|
||||
if salt != "" {
|
||||
@@ -173,12 +196,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
|
||||
}
|
||||
|
||||
// getBlock gets a block from the pool of size blockSize
|
||||
func (c *Cipher) getBlock() []byte {
|
||||
func (c *cipher) getBlock() []byte {
|
||||
return c.buffers.Get().([]byte)
|
||||
}
|
||||
|
||||
// putBlock returns a block to the pool of size blockSize
|
||||
func (c *Cipher) putBlock(buf []byte) {
|
||||
func (c *cipher) putBlock(buf []byte) {
|
||||
if len(buf) != blockSize {
|
||||
panic("bad blocksize returned to pool")
|
||||
}
|
||||
@@ -223,7 +246,7 @@ func decodeFileName(in string) ([]byte, error) {
|
||||
// This means that
|
||||
// * filenames with the same name will encrypt the same
|
||||
// * filenames which start the same won't have a common prefix
|
||||
func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
func (c *cipher) encryptSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
}
|
||||
@@ -233,7 +256,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
@@ -260,7 +283,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// Simple obfuscation routines
|
||||
func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
func (c *cipher) obfuscateSegment(plaintext string) string {
|
||||
if plaintext == "" {
|
||||
return ""
|
||||
}
|
||||
@@ -347,7 +370,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
@@ -434,7 +457,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
}
|
||||
|
||||
// encryptFileName encrypts a file path
|
||||
func (c *Cipher) encryptFileName(in string) string {
|
||||
func (c *cipher) encryptFileName(in string) string {
|
||||
segments := strings.Split(in, "/")
|
||||
for i := range segments {
|
||||
// Skip directory name encryption if the user chose to
|
||||
@@ -452,7 +475,7 @@ func (c *Cipher) encryptFileName(in string) string {
|
||||
}
|
||||
|
||||
// EncryptFileName encrypts a file path
|
||||
func (c *Cipher) EncryptFileName(in string) string {
|
||||
func (c *cipher) EncryptFileName(in string) string {
|
||||
if c.mode == NameEncryptionOff {
|
||||
return in + encryptedSuffix
|
||||
}
|
||||
@@ -460,7 +483,7 @@ func (c *Cipher) EncryptFileName(in string) string {
|
||||
}
|
||||
|
||||
// EncryptDirName encrypts a directory path
|
||||
func (c *Cipher) EncryptDirName(in string) string {
|
||||
func (c *cipher) EncryptDirName(in string) string {
|
||||
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
|
||||
return in
|
||||
}
|
||||
@@ -468,7 +491,7 @@ func (c *Cipher) EncryptDirName(in string) string {
|
||||
}
|
||||
|
||||
// decryptFileName decrypts a file path
|
||||
func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
func (c *cipher) decryptFileName(in string) (string, error) {
|
||||
segments := strings.Split(in, "/")
|
||||
for i := range segments {
|
||||
var err error
|
||||
@@ -491,7 +514,7 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
|
||||
}
|
||||
|
||||
// DecryptFileName decrypts a file path
|
||||
func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
func (c *cipher) DecryptFileName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff {
|
||||
remainingLength := len(in) - len(encryptedSuffix)
|
||||
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
|
||||
@@ -503,15 +526,14 @@ func (c *Cipher) DecryptFileName(in string) (string, error) {
|
||||
}
|
||||
|
||||
// DecryptDirName decrypts a directory path
|
||||
func (c *Cipher) DecryptDirName(in string) (string, error) {
|
||||
func (c *cipher) DecryptDirName(in string) (string, error) {
|
||||
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
|
||||
return in, nil
|
||||
}
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
|
||||
// NameEncryptionMode returns the encryption mode in use for names
|
||||
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
|
||||
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
|
||||
return c.mode
|
||||
}
|
||||
|
||||
@@ -579,7 +601,7 @@ func (n *nonce) add(x uint64) {
|
||||
type encrypter struct {
|
||||
mu sync.Mutex
|
||||
in io.Reader
|
||||
c *Cipher
|
||||
c *cipher
|
||||
nonce nonce
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
@@ -589,7 +611,7 @@ type encrypter struct {
|
||||
}
|
||||
|
||||
// newEncrypter creates a new file handle encrypting on the fly
|
||||
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
|
||||
fh := &encrypter{
|
||||
in: in,
|
||||
c: c,
|
||||
@@ -661,19 +683,13 @@ func (fh *encrypter) finish(err error) (int, error) {
|
||||
}
|
||||
|
||||
// Encrypt data encrypts the data stream
|
||||
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
|
||||
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
|
||||
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
|
||||
out, err := c.newEncrypter(in, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return wrap(out), out, nil // and wrap the accounting back on
|
||||
}
|
||||
|
||||
// EncryptData encrypts the data stream
|
||||
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
|
||||
out, _, err := c.encryptData(in)
|
||||
return out, err
|
||||
return wrap(out), nil // and wrap the accounting back on
|
||||
}
|
||||
|
||||
// decrypter decrypts an io.ReaderCloser on the fly
|
||||
@@ -682,7 +698,7 @@ type decrypter struct {
|
||||
rc io.ReadCloser
|
||||
nonce nonce
|
||||
initialNonce nonce
|
||||
c *Cipher
|
||||
c *cipher
|
||||
buf []byte
|
||||
readBuf []byte
|
||||
bufIndex int
|
||||
@@ -693,7 +709,7 @@ type decrypter struct {
|
||||
}
|
||||
|
||||
// newDecrypter creates a new file handle decrypting on the fly
|
||||
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
fh := &decrypter{
|
||||
rc: rc,
|
||||
c: c,
|
||||
@@ -721,7 +737,7 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
}
|
||||
|
||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
var rc io.ReadCloser
|
||||
doRangeSeek := false
|
||||
setLimit := false
|
||||
@@ -996,7 +1012,7 @@ func (fh *decrypter) finishAndClose(err error) error {
|
||||
}
|
||||
|
||||
// DecryptData decrypts the data stream
|
||||
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
out, err := c.newDecrypter(rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1009,7 +1025,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1018,7 +1034,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
|
||||
}
|
||||
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
func (c *Cipher) EncryptedSize(size int64) int64 {
|
||||
func (c *cipher) EncryptedSize(size int64) int64 {
|
||||
blocks, residue := size/blockDataSize, size%blockDataSize
|
||||
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
|
||||
if residue != 0 {
|
||||
@@ -1028,7 +1044,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
|
||||
}
|
||||
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
|
||||
func (c *cipher) DecryptedSize(size int64) (int64, error) {
|
||||
size -= int64(fileHeaderSize)
|
||||
if size < 0 {
|
||||
return 0, ErrorEncryptedFileTooShort
|
||||
@@ -1047,6 +1063,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
|
||||
|
||||
// check interfaces
|
||||
var (
|
||||
_ Cipher = (*cipher)(nil)
|
||||
_ io.ReadCloser = (*decrypter)(nil)
|
||||
_ io.Seeker = (*decrypter)(nil)
|
||||
_ fs.RangeSeeker = (*decrypter)(nil)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -785,7 +784,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in := &errorReader{io.ErrUnexpectedEOF}
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -794,6 +793,14 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
|
||||
type errorReader struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (er errorReader) Read(p []byte) (n int, err error) {
|
||||
return 0, er.err
|
||||
}
|
||||
|
||||
type closeDetector struct {
|
||||
io.Reader
|
||||
closed int
|
||||
@@ -831,7 +838,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
assert.Equal(t, 1, cd.closed)
|
||||
}
|
||||
|
||||
er := &readers.ErrorReader{Err: errors.New("potato")}
|
||||
er := &errorReader{errors.New("potato")}
|
||||
cd = newCloseDetector(er)
|
||||
fh, err = c.newDecrypter(cd)
|
||||
assert.Nil(t, fh)
|
||||
@@ -857,7 +864,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in2 := &errorReader{io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
@@ -1111,7 +1118,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
|
||||
// Test producing an error on the file on Read the underlying file
|
||||
in1 := bytes.NewBuffer(file1)
|
||||
in2 := &readers.ErrorReader{Err: errors.New("potato")}
|
||||
in2 := &errorReader{errors.New("potato")}
|
||||
in := io.MultiReader(in1, in2)
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
|
||||
@@ -26,7 +26,6 @@ func init() {
|
||||
Name: "crypt",
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
@@ -91,7 +90,7 @@ names, or for debugging purposes.`,
|
||||
}
|
||||
|
||||
// newCipherForConfig constructs a Cipher for the given config name
|
||||
func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -118,7 +117,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config
|
||||
func NewCipher(m configmap.Mapper) (*Cipher, error) {
|
||||
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -204,7 +203,7 @@ type Fs struct {
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features // optional features
|
||||
cipher *Cipher
|
||||
cipher Cipher
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -328,7 +327,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -352,7 +351,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -505,11 +504,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
wrappedIn, encrypter, err := f.cipher.encryptData(in)
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -562,37 +561,6 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
return f.cipher.DecryptFileName(encryptedFileName)
|
||||
}
|
||||
|
||||
// computeHashWithNonce takes the nonce and encrypts the contents of
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Open the src for input
|
||||
in, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
@@ -604,7 +572,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
d, err := f.cipher.newDecrypter(in)
|
||||
d, err := f.cipher.(*cipher).newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
@@ -629,7 +597,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
|
||||
return f.computeHashWithNonce(ctx, nonce, src, hashType)
|
||||
// Open the src for input
|
||||
in, err = src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
@@ -701,67 +692,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "encode",
|
||||
Short: "Encode the given filename(s)",
|
||||
Long: `This encodes the filenames given as arguments returning a list of
|
||||
strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
},
|
||||
{
|
||||
Name: "decode",
|
||||
Short: "Decode the given filename(s)",
|
||||
Long: `This decodes the filenames given as arguments returning a list of
|
||||
strings of the decoded results. It will return an error if any of the
|
||||
inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "decode":
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, encryptedFileName := range arg {
|
||||
fileName, err := f.DecryptFileName(encryptedFileName)
|
||||
if err != nil {
|
||||
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
|
||||
}
|
||||
out = append(out, fileName)
|
||||
}
|
||||
return out, nil
|
||||
case "encode":
|
||||
out := make([]string, 0, len(arg))
|
||||
for _, fileName := range arg {
|
||||
encryptedFileName := f.EncryptFileName(fileName)
|
||||
out = append(out, encryptedFileName)
|
||||
}
|
||||
return out, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
@@ -903,15 +833,13 @@ func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
// This encrypts the remote name and adjusts the size
|
||||
type ObjectInfo struct {
|
||||
fs.ObjectInfo
|
||||
f *Fs
|
||||
nonce nonce
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
|
||||
return &ObjectInfo{
|
||||
ObjectInfo: src,
|
||||
f: f,
|
||||
nonce: nonce,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -937,23 +865,6 @@ func (o *ObjectInfo) Size() int64 {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
var srcObj fs.Object
|
||||
var ok bool
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is a operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
if srcObj.Fs().Features().IsLocal {
|
||||
// Read the data and encrypt it to calculate the hash
|
||||
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
|
||||
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -992,7 +903,6 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
localFs, err := fs.TemporaryLocalFs()
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var (
|
||||
contents = random.String(100)
|
||||
path = "hash_test_object"
|
||||
ctx = context.Background()
|
||||
)
|
||||
if wrap {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
var outBuf bytes.Buffer
|
||||
enc, err := f.cipher.newEncrypter(inBuf, nil)
|
||||
require.NoError(t, err)
|
||||
nonce := enc.nonce // read the nonce at the start
|
||||
_, err = io.Copy(&outBuf, enc)
|
||||
require.NoError(t, err)
|
||||
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in a fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
// saved from the encryptor
|
||||
src := f.newObjectInfo(oi, nonce)
|
||||
|
||||
// Test ObjectInfo methods
|
||||
assert.Equal(t, int64(outBuf.Len()), src.Size())
|
||||
assert.Equal(t, f, src.Fs())
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
// Test ObjectInfo.Hash
|
||||
wantHash := md5.Sum(outBuf.Bytes())
|
||||
gotHash, err := src.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
|
||||
}
|
||||
|
||||
func testComputeHash(t *testing.T, f *Fs) {
|
||||
var (
|
||||
contents = random.String(100)
|
||||
path = "compute_hash_test"
|
||||
ctx = context.Background()
|
||||
hashType = f.Fs.Hashes().GetOne()
|
||||
)
|
||||
|
||||
if hashType == hash.None {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test computed hash matches remote object hash
|
||||
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remoteObjHash, computedHash)
|
||||
}
|
||||
|
||||
// InternalTest is called by fstests.Run to extra tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
|
||||
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
|
||||
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
|
||||
}
|
||||
626
backend/drive/drive.go
Executable file → Normal file
626
backend/drive/drive.go
Executable file → Normal file
@@ -29,7 +29,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
@@ -55,7 +54,6 @@ const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
|
||||
driveFolderType = "application/vnd.google-apps.folder"
|
||||
shortcutMimeType = "application/vnd.google-apps.shortcut"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
defaultMinSleep = fs.Duration(100 * time.Millisecond)
|
||||
@@ -67,7 +65,7 @@ const (
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
minChunkSize = 256 * fs.KibiByte
|
||||
defaultChunkSize = 8 * fs.MebiByte
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
|
||||
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -159,7 +157,6 @@ func init() {
|
||||
Name: "drive",
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
@@ -400,7 +397,7 @@ will download it anyway.`,
|
||||
Default: false,
|
||||
Help: `Show sizes as storage quota usage, not actual size.
|
||||
|
||||
Show the size of a file as the storage quota used. This is the
|
||||
Show the size of a file as the the storage quota used. This is the
|
||||
current version plus any older versions that have been set to keep
|
||||
forever.
|
||||
|
||||
@@ -470,16 +467,6 @@ Google don't document so it may break in the future.
|
||||
See: https://github.com/rclone/rclone/issues/3857
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_shortcuts",
|
||||
Help: `If set skip shortcut files
|
||||
|
||||
Normally rclone dereferences shortcut files making them appear as if
|
||||
they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -537,7 +524,6 @@ type Options struct {
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -556,8 +542,6 @@ type Fs struct {
|
||||
exportExtensions []string // preferred extensions to download docs
|
||||
importMimeTypes []string // MIME types to convert to docs
|
||||
isTeamDrive bool // true if this is a team drive
|
||||
fileFields googleapi.Field // fields to fetch file info with
|
||||
m configmap.Mapper
|
||||
}
|
||||
|
||||
type baseObject struct {
|
||||
@@ -567,7 +551,6 @@ type baseObject struct {
|
||||
modifiedDate string // RFC3339 time it was last modified
|
||||
mimeType string // The object MIME type
|
||||
bytes int64 // size of the object
|
||||
parents int // number of parents
|
||||
}
|
||||
type documentObject struct {
|
||||
baseObject
|
||||
@@ -633,9 +616,6 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
return true, err
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received team drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -662,21 +642,17 @@ func containsString(slice []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// getFile returns drive.File for the ID passed and fields passed in
|
||||
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
var info *drive.File
|
||||
var err error
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get(ID).
|
||||
Fields(fields).
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
info, err := f.getFile("root", "id")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
@@ -742,7 +718,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
query = append(query, titleQuery.String())
|
||||
}
|
||||
if directoriesOnly {
|
||||
query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType))
|
||||
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
|
||||
}
|
||||
if filesOnly {
|
||||
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
||||
@@ -766,7 +742,22 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
|
||||
var fields = partialFields
|
||||
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
if f.opt.SizeAsQuota {
|
||||
fields += ",quotaBytesUsed"
|
||||
}
|
||||
|
||||
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
@@ -783,24 +774,6 @@ OUTER:
|
||||
}
|
||||
for _, item := range files.Files {
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if isShortcut(item) {
|
||||
// ignore shortcuts if directed
|
||||
if f.opt.SkipShortcuts {
|
||||
continue
|
||||
}
|
||||
// skip file shortcuts if directory only
|
||||
if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType {
|
||||
continue
|
||||
}
|
||||
// skip directory shortcuts if file only
|
||||
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
|
||||
continue
|
||||
}
|
||||
item, err = f.resolveShortcut(item)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "list")
|
||||
}
|
||||
}
|
||||
// Check the case of items is correct since
|
||||
// the `=` operator is case insensitive.
|
||||
if title != "" && title != item.Name {
|
||||
@@ -1083,10 +1056,8 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: newPacer(opt),
|
||||
m: m,
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.fileFields = f.getFileFields()
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
@@ -1198,28 +1169,9 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate: modifiedDate,
|
||||
mimeType: info.MimeType,
|
||||
bytes: size,
|
||||
parents: len(info.Parents),
|
||||
}
|
||||
}
|
||||
|
||||
// getFileFields gets the fields for a normal file Get or List
|
||||
func (f *Fs) getFileFields() (fields googleapi.Field) {
|
||||
fields = partialFields
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
if f.opt.SizeAsQuota {
|
||||
fields += ",quotaBytesUsed"
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// newRegularObject creates a fs.Object for a normal drive.File
|
||||
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||
@@ -1233,7 +1185,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
|
||||
}
|
||||
return &Object{
|
||||
baseObject: f.newBaseObject(remote, info),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
|
||||
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
|
||||
md5sum: strings.ToLower(info.Md5Checksum),
|
||||
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
|
||||
}
|
||||
@@ -1245,18 +1197,17 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := actualID(info.Id)
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
|
||||
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
|
||||
if f.opt.AlternateExport {
|
||||
switch info.MimeType {
|
||||
case "application/vnd.google-apps.drawing":
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
|
||||
case "application/vnd.google-apps.document":
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
|
||||
case "application/vnd.google-apps.spreadsheet":
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
|
||||
case "application/vnd.google-apps.presentation":
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
|
||||
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
|
||||
}
|
||||
}
|
||||
baseObject := f.newBaseObject(remote+extension, info)
|
||||
@@ -1314,37 +1265,23 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro
|
||||
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
|
||||
func (f *Fs) newObjectWithExportInfo(
|
||||
remote string, info *drive.File,
|
||||
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
|
||||
// Note that resolveShortcut will have been called already if
|
||||
// we are being called from a listing. However the drive.Item
|
||||
// will have been resolved so this will do nothing.
|
||||
info, err = f.resolveShortcut(info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new object")
|
||||
}
|
||||
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
|
||||
switch {
|
||||
case info.MimeType == driveFolderType:
|
||||
return nil, fs.ErrorNotAFile
|
||||
case info.MimeType == shortcutMimeType:
|
||||
// We can only get here if f.opt.SkipShortcuts is set
|
||||
// and not from a listing. This is unlikely.
|
||||
fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
case info.Md5Checksum != "" || info.Size > 0:
|
||||
// If item has MD5 sum or a length it is a file stored on drive
|
||||
return f.newRegularObject(remote, info), nil
|
||||
case f.opt.SkipGdocs:
|
||||
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
return nil, nil
|
||||
default:
|
||||
// If item MimeType is in the ExportFormats then it is a google doc
|
||||
if !isDocument {
|
||||
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
return nil, nil
|
||||
}
|
||||
if extension == "" {
|
||||
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
return nil, nil
|
||||
}
|
||||
if isLinkMimeType(exportMimeType) {
|
||||
return f.newLinkObject(remote, info, extension, exportMimeType)
|
||||
@@ -1376,7 +1313,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
pathID = actualID(pathID)
|
||||
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
||||
@@ -1570,7 +1506,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
var iErr error
|
||||
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
||||
@@ -1656,13 +1591,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
item.Parents = dirs
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
var i int
|
||||
earlyExit := false
|
||||
// If only one item in paths then no need to search for the ID
|
||||
// assuming google drive is doing its job properly.
|
||||
//
|
||||
@@ -1672,9 +1602,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
// - shared with me items have no parents at the root
|
||||
// - if using a root alias, eg "root" or "appDataFolder" the ID won't match
|
||||
i = 0
|
||||
// items at root can have more than one parent so we need to put
|
||||
// the item in just once.
|
||||
earlyExit = true
|
||||
} else {
|
||||
// only handle parents that are in the requested dirs list if not at root
|
||||
i = sort.SearchStrings(dirs, parent)
|
||||
@@ -1694,11 +1621,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
|
||||
// If didn't check parents then insert only once
|
||||
if earlyExit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -1749,7 +1671,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -1763,12 +1684,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
select {
|
||||
case in <- job:
|
||||
case in <- listREntry{d.ID(), d.Remote()}:
|
||||
wg.Add(1)
|
||||
default:
|
||||
overflow = append(overflow, job)
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
listed++
|
||||
@@ -1845,87 +1765,10 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return nil
|
||||
}
|
||||
|
||||
const shortcutSeparator = '\t'
|
||||
|
||||
// joinID adds an actual drive ID to the shortcut ID it came from
|
||||
//
|
||||
// directoryIDs in the dircache are these composite directory IDs so
|
||||
// we must always unpack them before use.
|
||||
func joinID(actual, shortcut string) string {
|
||||
return actual + string(shortcutSeparator) + shortcut
|
||||
}
|
||||
|
||||
// splitID separates an actual ID and a shortcut ID from a composite
|
||||
// ID. If there was no shortcut ID then it will return "" for it.
|
||||
func splitID(compositeID string) (actualID, shortcutID string) {
|
||||
i := strings.IndexRune(compositeID, shortcutSeparator)
|
||||
if i < 0 {
|
||||
return compositeID, ""
|
||||
}
|
||||
return compositeID[:i], compositeID[i+1:]
|
||||
}
|
||||
|
||||
// isShortcutID returns true if compositeID refers to a shortcut
|
||||
func isShortcutID(compositeID string) bool {
|
||||
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
|
||||
}
|
||||
|
||||
// actualID returns an actual ID from a composite ID
|
||||
func actualID(compositeID string) (actualID string) {
|
||||
actualID, _ = splitID(compositeID)
|
||||
return actualID
|
||||
}
|
||||
|
||||
// shortcutID returns a shortcut ID from a composite ID if available,
|
||||
// or the actual ID if not.
|
||||
func shortcutID(compositeID string) (shortcutID string) {
|
||||
actualID, shortcutID := splitID(compositeID)
|
||||
if shortcutID != "" {
|
||||
return shortcutID
|
||||
}
|
||||
return actualID
|
||||
}
|
||||
|
||||
// isShortcut returns true of the item is a shortcut
|
||||
func isShortcut(item *drive.File) bool {
|
||||
return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil
|
||||
}
|
||||
|
||||
// Dereference shortcut if required. It returns the newItem (which may
|
||||
// be just item).
|
||||
//
|
||||
// If we return a new item then the ID will be adjusted to be a
|
||||
// composite of the actual ID and the shortcut ID. This is to make
|
||||
// sure that we have decided in all use places what we are doing with
|
||||
// the ID.
|
||||
//
|
||||
// Note that we assume shortcuts can't point to shortcuts. Google
|
||||
// drive web interface doesn't offer the option to create a shortcut
|
||||
// to a shortcut. The documentation is silent on the issue.
|
||||
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
|
||||
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
|
||||
return item, nil
|
||||
}
|
||||
if item.ShortcutDetails == nil {
|
||||
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
|
||||
return item, nil
|
||||
}
|
||||
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve shortcut")
|
||||
}
|
||||
// make sure we use the Name and Parents from the original item
|
||||
newItem.Name = item.Name
|
||||
newItem.Parents = item.Parents
|
||||
// the new ID is a composite ID
|
||||
newItem.Id = joinID(newItem.Id, item.Id)
|
||||
return newItem, nil
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
// When the drive.File cannot be represented as a fs.DirEntry
|
||||
// (nil, nil) is returned.
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
|
||||
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
|
||||
switch {
|
||||
case item.MimeType == driveFolderType:
|
||||
// cache the directory ID for later lookups
|
||||
@@ -1936,11 +1779,7 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry,
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
default:
|
||||
entry, err = f.newObjectWithInfo(remote, item)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return entry, err
|
||||
return f.newObjectWithInfo(remote, item)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1953,7 +1792,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// Define the metadata for the file we are going to create.
|
||||
@@ -2057,18 +1895,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) < 2 {
|
||||
return nil
|
||||
}
|
||||
newDirs := dirs[:0]
|
||||
for _, dir := range dirs {
|
||||
if isShortcutID(dir.ID()) {
|
||||
fs.Infof(dir, "skipping shortcut directory")
|
||||
continue
|
||||
}
|
||||
newDirs = append(newDirs, dir)
|
||||
}
|
||||
dirs = newDirs
|
||||
if len(dirs) < 2 {
|
||||
return nil
|
||||
}
|
||||
@@ -2102,7 +1928,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.delete(ctx, srcDir.ID(), true)
|
||||
err = f.rmdir(ctx, srcDir.ID(), true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
@@ -2122,20 +1948,20 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// delete a file or directory unconditionally by ID
|
||||
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
// Rmdir deletes a directory unconditionally by ID
|
||||
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
if useTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = f.svc.Files.Update(id, &info).
|
||||
_, err = f.svc.Files.Update(directoryID, &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(id).
|
||||
err = f.svc.Files.Delete(directoryID).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -2154,11 +1980,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID, shortcutID := splitID(directoryID)
|
||||
// if directory is a shortcut remove it regardless
|
||||
if shortcutID != "" {
|
||||
return f.delete(ctx, shortcutID, f.opt.UseTrash)
|
||||
}
|
||||
var trashedFiles = false
|
||||
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||
if !item.Trashed {
|
||||
@@ -2179,7 +2000,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// trash the directory if it had trashed files
|
||||
// in or the user wants to trash, otherwise
|
||||
// delete it.
|
||||
err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash)
|
||||
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2208,13 +2029,11 @@ func (f *Fs) Precision() time.Duration {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
var srcObj *baseObject
|
||||
ext := ""
|
||||
readDescription := false
|
||||
switch src := src.(type) {
|
||||
case *Object:
|
||||
srcObj = &src.baseObject
|
||||
case *documentObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
readDescription = true
|
||||
case *linkObject:
|
||||
srcObj, ext = &src.baseObject, src.ext()
|
||||
default:
|
||||
@@ -2238,28 +2057,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if readDescription {
|
||||
// preserve the description on copy for docs
|
||||
info, err := f.getFile(actualID(srcObj.id), "description")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read description for Google Doc")
|
||||
}
|
||||
// set the description if there is one, or use the default if not
|
||||
if info.Description != "" {
|
||||
createInfo.Description = info.Description
|
||||
}
|
||||
} else {
|
||||
// don't overwrite the description on copy for files
|
||||
// this should work for docs but it doesn't - it is probably a bug in Google Drive
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy - this is the shortcut if available
|
||||
id := shortcutID(srcObj.id)
|
||||
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Copy(id, createInfo).
|
||||
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever).
|
||||
@@ -2298,7 +2098,23 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
if f.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = f.svc.Files.Delete(f.dirCache.RootID()).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
}
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
f.dirCache.ResetRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -2404,7 +2220,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcParentID = actualID(srcParentID)
|
||||
|
||||
// Temporary Object under construction
|
||||
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||
@@ -2417,7 +2232,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Do the move
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
|
||||
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
|
||||
RemoveParents(srcParentID).
|
||||
AddParents(dstParents).
|
||||
Fields(partialFields).
|
||||
@@ -2437,14 +2252,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
id = shortcutID(id)
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
id = shortcutID(o.(fs.IDer).ID())
|
||||
id = o.(fs.IDer).ID()
|
||||
}
|
||||
|
||||
permission := &drive.Permission{
|
||||
@@ -2519,7 +2333,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryID = actualID(dstDirectoryID)
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
@@ -2543,19 +2356,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDirectoryID = actualID(srcDirectoryID)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
patch := drive.File{
|
||||
Name: leaf,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
|
||||
_, err = f.svc.Files.Update(srcID, &patch).
|
||||
RemoveParents(srcDirectoryID).
|
||||
AddParents(dstDirectoryID).
|
||||
Fields("").
|
||||
@@ -2732,258 +2545,6 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
|
||||
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't convert chunk size to int")
|
||||
}
|
||||
chunkSize := fs.SizeSuffix(chunkSizeInt)
|
||||
if chunkSize == f.opt.ChunkSize {
|
||||
return nil
|
||||
}
|
||||
err = checkUploadChunkSize(chunkSize)
|
||||
if err == nil {
|
||||
f.opt.ChunkSize = chunkSize
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) changeServiceAccountFile(file string) (err error) {
|
||||
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
|
||||
if file == f.opt.ServiceAccountFile {
|
||||
return nil
|
||||
}
|
||||
oldSvc := f.svc
|
||||
oldv2Svc := f.v2Svc
|
||||
oldOAuthClient := f.client
|
||||
oldFile := f.opt.ServiceAccountFile
|
||||
oldCredentials := f.opt.ServiceAccountCredentials
|
||||
defer func() {
|
||||
// Undo all the changes instead of doing selective undo's
|
||||
if err != nil {
|
||||
f.svc = oldSvc
|
||||
f.v2Svc = oldv2Svc
|
||||
f.client = oldOAuthClient
|
||||
f.opt.ServiceAccountFile = oldFile
|
||||
f.opt.ServiceAccountCredentials = oldCredentials
|
||||
}
|
||||
}()
|
||||
f.opt.ServiceAccountFile = file
|
||||
f.opt.ServiceAccountCredentials = ""
|
||||
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "drive: failed when making oauth client")
|
||||
}
|
||||
f.client = oAuthClient
|
||||
f.svc, err = drive.New(f.client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive client")
|
||||
}
|
||||
if f.opt.V2DownloadMinSize >= 0 {
|
||||
f.v2Svc, err = drive_v2.New(f.client)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't create Drive v2 client")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
|
||||
//
|
||||
// Will not overwrite existing files
|
||||
func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) {
|
||||
srcFs := f
|
||||
srcPath = strings.Trim(srcPath, "/")
|
||||
dstPath = strings.Trim(dstPath, "/")
|
||||
if dstPath == "" {
|
||||
return nil, errors.New("shortcut destination can't be root directory")
|
||||
}
|
||||
|
||||
// Find source
|
||||
var srcID string
|
||||
isDir := false
|
||||
if srcPath == "" {
|
||||
// source is root directory
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcID = f.dirCache.RootID()
|
||||
isDir = true
|
||||
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
|
||||
if err != fs.ErrorNotAFile {
|
||||
return nil, errors.Wrap(err, "can't find source")
|
||||
}
|
||||
// source was a directory
|
||||
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to find source dir")
|
||||
}
|
||||
isDir = true
|
||||
} else {
|
||||
// source was a file
|
||||
srcID = srcObj.(*Object).id
|
||||
}
|
||||
srcID = actualID(srcID) // link to underlying object not to shortcut
|
||||
|
||||
// Find destination
|
||||
_, err = dstFs.NewObject(ctx, dstPath)
|
||||
if err != fs.ErrorObjectNotFound {
|
||||
if err == nil {
|
||||
err = errors.New("existing file")
|
||||
} else if err == fs.ErrorNotAFile {
|
||||
err = errors.New("existing directory")
|
||||
}
|
||||
return nil, errors.Wrap(err, "not overwriting shortcut target")
|
||||
}
|
||||
|
||||
// Create destination shortcut
|
||||
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut destination failed")
|
||||
}
|
||||
createInfo.MimeType = shortcutMimeType
|
||||
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
|
||||
TargetId: srcID,
|
||||
}
|
||||
|
||||
var info *drive.File
|
||||
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = dstFs.svc.Files.Create(createInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
|
||||
Do()
|
||||
return dstFs.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "shortcut creation failed")
|
||||
}
|
||||
if isDir {
|
||||
return nil, nil
|
||||
}
|
||||
return dstFs.newObjectWithInfo(dstPath, info)
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
Long: `This is a get command which will be used to fetch the various drive config parameters
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "show the current upload chunk size",
|
||||
"service_account_file": "show the current service account file",
|
||||
},
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the drive config parameters",
|
||||
Long: `This is a set command which will be used to update the various drive config parameters
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "update the current upload chunk size",
|
||||
"service_account_file": "update the current service account file",
|
||||
},
|
||||
}, {
|
||||
Name: "shortcut",
|
||||
Short: "Create shortcuts from files or directories",
|
||||
Long: `This command creates shortcuts from files or directories.
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
|
||||
In the first example this creates a shortcut from the "source_item"
|
||||
which can be a file or a directory to the "destination_shortcut". The
|
||||
"source_item" and the "destination_shortcut" should be relative paths
|
||||
from "drive:"
|
||||
|
||||
In the second example this creates a shortcut from the "source_item"
|
||||
relative to "drive:" to the "destination_shortcut" relative to
|
||||
"drive2:". This may fail with a permission error if the user
|
||||
authenticated with "drive2:" can't read files from "drive:".
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
},
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "get":
|
||||
out := make(map[string]string)
|
||||
if _, ok := opt["service_account_file"]; ok {
|
||||
out["service_account_file"] = f.opt.ServiceAccountFile
|
||||
}
|
||||
if _, ok := opt["chunk_size"]; ok {
|
||||
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
}
|
||||
return out, nil
|
||||
case "set":
|
||||
out := make(map[string]map[string]string)
|
||||
if serviceAccountFile, ok := opt["service_account_file"]; ok {
|
||||
serviceAccountMap := make(map[string]string)
|
||||
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
|
||||
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
|
||||
return out, err
|
||||
}
|
||||
f.m.Set("service_account_file", serviceAccountFile)
|
||||
serviceAccountMap["current"] = f.opt.ServiceAccountFile
|
||||
out["service_account_file"] = serviceAccountMap
|
||||
}
|
||||
if chunkSize, ok := opt["chunk_size"]; ok {
|
||||
chunkSizeMap := make(map[string]string)
|
||||
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
if err = f.changeChunkSize(chunkSize); err != nil {
|
||||
return out, err
|
||||
}
|
||||
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
|
||||
f.m.Set("chunk_size", chunkSizeString)
|
||||
chunkSizeMap["current"] = chunkSizeString
|
||||
out["chunk_size"] = chunkSizeMap
|
||||
}
|
||||
return out, nil
|
||||
case "shortcut":
|
||||
if len(arg) != 2 {
|
||||
return nil, errors.New("need exactly 2 arguments")
|
||||
}
|
||||
dstFs := f
|
||||
target, ok := opt["target"]
|
||||
if ok {
|
||||
targetFs, err := cache.Get(target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't find target")
|
||||
}
|
||||
dstFs, ok = targetFs.(*Fs)
|
||||
if !ok {
|
||||
return nil, errors.New("target is not a drive backend")
|
||||
}
|
||||
}
|
||||
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -3044,9 +2605,8 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||
}
|
||||
return nil, "", "", "", false, err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
|
||||
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
|
||||
if !f.opt.SkipGdocs {
|
||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
||||
if exportName == leaf {
|
||||
@@ -3096,7 +2656,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
var info *drive.File
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -3225,7 +2785,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.v2Download {
|
||||
var v2File *drive_v2.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
|
||||
v2File, err = o.fs.v2Svc.Files.Get(o.id).
|
||||
Fields("downloadUrl").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
@@ -3304,7 +2864,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
|
||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
|
||||
Media(in, googleapi.ContentType(uploadMimeType)).
|
||||
Fields(partialFields).
|
||||
SupportsAllDrives(true).
|
||||
@@ -3324,26 +2884,6 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// If o is a shortcut
|
||||
if isShortcutID(o.id) {
|
||||
// Delete it first
|
||||
err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Then put the file as a new file
|
||||
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update the object
|
||||
if newO, ok := newObj.(*Object); ok {
|
||||
*o = *newO
|
||||
} else {
|
||||
fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
srcMimeType := fs.MimeType(ctx, src)
|
||||
updateInfo := &drive.File{
|
||||
MimeType: srcMimeType,
|
||||
@@ -3414,10 +2954,25 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
|
||||
|
||||
// Remove an object
|
||||
func (o *baseObject) Remove(ctx context.Context) error {
|
||||
if o.parents > 1 {
|
||||
return errors.New("can't delete safely - has multiple parents")
|
||||
}
|
||||
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
|
||||
var err error
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
if o.fs.opt.UseTrash {
|
||||
info := drive.File{
|
||||
Trashed: true,
|
||||
}
|
||||
_, err = o.fs.svc.Files.Update(o.id, &info).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
} else {
|
||||
err = o.fs.svc.Files.Delete(o.id).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
}
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
@@ -3479,7 +3034,6 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -269,98 +268,6 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
|
||||
func (f *Fs) InternalTestShortcuts(t *testing.T) {
|
||||
const (
|
||||
// from fstest/fstests/fstests.go
|
||||
existingDir = "hello? sausage"
|
||||
existingFile = "file name.txt"
|
||||
existingSubDir = "êé"
|
||||
)
|
||||
ctx := context.Background()
|
||||
srcObj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
srcHash, err := srcObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, "", srcHash)
|
||||
t.Run("Errors", func(t *testing.T) {
|
||||
_, err := f.makeShortcut(ctx, "", f, "")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't be root")
|
||||
|
||||
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't find source")
|
||||
|
||||
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not overwriting")
|
||||
assert.Contains(t, err.Error(), "existing file")
|
||||
|
||||
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not overwriting")
|
||||
assert.Contains(t, err.Error(), "existing directory")
|
||||
})
|
||||
t.Run("File", func(t *testing.T) {
|
||||
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dstObj)
|
||||
assert.Equal(t, "shortcut.txt", dstObj.Remote())
|
||||
dstHash, err := dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
})
|
||||
t.Run("Dir", func(t *testing.T) {
|
||||
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, dstObj)
|
||||
entries, err := f.List(ctx, "shortcutdir")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
|
||||
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
|
||||
})
|
||||
t.Run("Command", func(t *testing.T) {
|
||||
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "need exactly 2 arguments")
|
||||
|
||||
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
|
||||
"target": "doesnotexistremote:",
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "couldn't find target")
|
||||
|
||||
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
|
||||
"target": ".",
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "target is not a drive backend")
|
||||
|
||||
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
|
||||
"target": fs.ConfigString(f),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
dstObj := dstObjI.(*Object)
|
||||
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
|
||||
dstHash, err := dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
|
||||
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
|
||||
require.NoError(t, err)
|
||||
dstObj = dstObjI.(*Object)
|
||||
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
|
||||
dstHash, err = dstObj.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, srcHash, dstHash)
|
||||
require.NoError(t, dstObj.Remove(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// These tests all depend on each other so run them as nested tests
|
||||
t.Run("DocumentImport", func(t *testing.T) {
|
||||
@@ -375,7 +282,6 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
})
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
6
backend/dropbox/dropbox.go
Executable file → Normal file
6
backend/dropbox/dropbox.go
Executable file → Normal file
@@ -225,11 +225,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
baseErrString := errors.Cause(err).Error()
|
||||
// First check for Insufficient Space
|
||||
if strings.Contains(baseErrString, "insufficient_space") {
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
// Then handle any official Retry-After header from Dropbox's SDK
|
||||
// handle any official Retry-After header from Dropbox's SDK first
|
||||
switch e := err.(type) {
|
||||
case auth.RateLimitAPIError:
|
||||
if e.RateLimitError.RetryAfter > 0 {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
403, // Forbidden (may happen when request limit is exceeded)
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
@@ -321,7 +320,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
@@ -339,7 +338,6 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
|
||||
NoResponse: true,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
Options: options,
|
||||
MultipartContentName: "file[]",
|
||||
MultipartFileName: fileName,
|
||||
MultipartParams: map[string][]string{
|
||||
|
||||
@@ -23,12 +23,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 400 * time.Millisecond // api is extremely rate limited now
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
attackConstant = 0 // start with max sleep
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -186,7 +185,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
@@ -339,7 +338,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -799,12 +799,10 @@ func (f *ftpReadCloser) Close() error {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
// mask the error if it was caused by a premature close
|
||||
// NB StatusAboutToSend is to work around a bug in pureftpd
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,9 +242,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "COLDLINE",
|
||||
Help: "Coldline storage class",
|
||||
}, {
|
||||
Value: "ARCHIVE",
|
||||
Help: "Archive storage class",
|
||||
}, {
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
@@ -558,7 +555,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -1066,33 +1063,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
object.CacheControl = value
|
||||
case "content-disposition":
|
||||
object.ContentDisposition = value
|
||||
case "content-encoding":
|
||||
object.ContentEncoding = value
|
||||
case "content-language":
|
||||
object.ContentLanguage = value
|
||||
case "content-type":
|
||||
object.ContentType = value
|
||||
default:
|
||||
const googMetaPrefix = "x-goog-meta-"
|
||||
if strings.HasPrefix(lowerKey, googMetaPrefix) {
|
||||
metaKey := lowerKey[len(googMetaPrefix):]
|
||||
object.Metadata[metaKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
|
||||
@@ -134,20 +134,14 @@ rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "start_year",
|
||||
Default: 2000,
|
||||
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
StartYear int `config:"start_year"`
|
||||
ReadOnly bool `config:"read_only"`
|
||||
ReadSize bool `config:"read_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -208,11 +202,6 @@ func (f *Fs) dirTime() time.Time {
|
||||
return f.startTime
|
||||
}
|
||||
|
||||
// startYear returns the start year
|
||||
func (f *Fs) startYear() int {
|
||||
return f.opt.StartYear
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
@@ -235,10 +224,6 @@ func errorHandler(resp *http.Response) error {
|
||||
if err != nil {
|
||||
body = nil
|
||||
}
|
||||
// Google sends 404 messages as images so be prepared for that
|
||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
body = []byte("Image not found or broken")
|
||||
}
|
||||
var e = api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
@@ -958,9 +943,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Upload the media item in exchange for an UploadToken
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
Options: options,
|
||||
Method: "POST",
|
||||
Path: "/uploads",
|
||||
ExtraHeaders: map[string]string{
|
||||
"X-Goog-Upload-File-Name": fileName,
|
||||
"X-Goog-Upload-Protocol": "raw",
|
||||
@@ -1019,9 +1003,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
o.fs.uploadedMu.Lock()
|
||||
o.fs.uploaded.AddEntry(o)
|
||||
o.fs.uploadedMu.Unlock()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ type lister interface {
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
startYear() int
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
@@ -223,10 +222,11 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from startYear to today
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := f.startYear(); year <= currentYear; year++ {
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
|
||||
@@ -59,11 +59,6 @@ func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
// mock startYear for testing
|
||||
func (f *testLister) startYear() int {
|
||||
return 2000
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
|
||||
@@ -166,7 +166,8 @@ func TestNewObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||
|
||||
@@ -164,12 +164,6 @@ type CustomerInfo struct {
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// TrashResponse is returned when emptying the Trash
|
||||
type TrashResponse struct {
|
||||
Folders int64 `json:"folders"`
|
||||
Files int64 `json:"files"`
|
||||
}
|
||||
|
||||
// XML structures returned by the old API
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
|
||||
@@ -140,11 +140,6 @@ func init() {
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "trashed_only",
|
||||
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
@@ -179,7 +174,6 @@ type Options struct {
|
||||
Device string `config:"device"`
|
||||
Mountpoint string `config:"mountpoint"`
|
||||
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
|
||||
TrashedOnly bool `config:"trashed_only"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Unlink bool `config:"unlink"`
|
||||
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
|
||||
@@ -525,9 +519,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: true,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
@@ -647,13 +638,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
if bool(result.Deleted) && !f.opt.TrashedOnly {
|
||||
if result.Deleted {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
for i := range result.Folders {
|
||||
item := &result.Folders[i]
|
||||
if !f.opt.TrashedOnly && bool(item.Deleted) {
|
||||
if item.Deleted {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
@@ -663,14 +654,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
for i := range result.Files {
|
||||
item := &result.Files[i]
|
||||
if f.opt.TrashedOnly {
|
||||
if !item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
if item.Deleted || item.State != "COMPLETED" {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
|
||||
o, err := f.newObjectWithInfo(ctx, remote, item)
|
||||
@@ -1064,22 +1049,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/purge_trash",
|
||||
}
|
||||
|
||||
var info api.TrashResponse
|
||||
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "couldn't empty trash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
@@ -1153,7 +1122,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
|
||||
if info.Deleted {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
return o.setMetaData(info)
|
||||
@@ -1290,7 +1259,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
@@ -1387,7 +1355,6 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -41,7 +41,6 @@ func init() {
|
||||
Name: "local",
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
@@ -698,50 +697,6 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported()
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "noop",
|
||||
Short: "A null operation for testing backend commands",
|
||||
Long: `This is a test command which has some options
|
||||
you can try to change the output.`,
|
||||
Opts: map[string]string{
|
||||
"echo": "echo the input arguments",
|
||||
"error": "return an error based on option value",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
||||
switch name {
|
||||
case "noop":
|
||||
if txt, ok := opt["error"]; ok {
|
||||
if txt == "" {
|
||||
txt = "unspecified error"
|
||||
}
|
||||
return nil, errors.New(txt)
|
||||
}
|
||||
if _, ok := opt["echo"]; ok {
|
||||
out := map[string]interface{}{}
|
||||
out["name"] = name
|
||||
out["arg"] = arg
|
||||
out["opt"] = opt
|
||||
return out, nil
|
||||
}
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -768,17 +723,8 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
err := o.lstat()
|
||||
var changed bool
|
||||
if err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
// If file not found then we assume any accumulated
|
||||
// hashes are OK - this will error on Open
|
||||
changed = true
|
||||
} else {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
} else {
|
||||
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
|
||||
o.fs.objectHashesMu.Lock()
|
||||
@@ -786,7 +732,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
hashValue, hashFound := o.hashes[r]
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if changed || hashes == nil || !hashFound {
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
|
||||
var in io.ReadCloser
|
||||
|
||||
if !o.translatedLink {
|
||||
@@ -1031,7 +977,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
err = preAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
@@ -1118,16 +1064,10 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
|
||||
return nil, err
|
||||
}
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(size, out)
|
||||
err = preAllocate(size, out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
||||
}
|
||||
// Set the file to be a sparse file (important on Windows)
|
||||
err = file.SetSparse(out)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to set sparse: %v", err)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -1218,7 +1158,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
||||
10
backend/local/preallocate_other.go
Normal file
10
backend/local/preallocate_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows,!linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
//+build linux
|
||||
|
||||
package file
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
fallocFlagsIndex int32
|
||||
)
|
||||
|
||||
// PreAllocate the file for performance reasons
|
||||
func PreAllocate(size int64, out *os.File) error {
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -44,8 +44,3 @@ again:
|
||||
// }
|
||||
return err
|
||||
}
|
||||
|
||||
// SetSparse makes the file be a sparse file
|
||||
func SetSparse(out *os.File) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
//+build windows
|
||||
|
||||
package file
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -32,8 +32,8 @@ type ioStatusBlock struct {
|
||||
Status, Information uintptr
|
||||
}
|
||||
|
||||
// PreAllocate the file for performance reasons
|
||||
func PreAllocate(size int64, out *os.File) error {
|
||||
// preAllocate the file for performance reasons
|
||||
func preAllocate(size int64, out *os.File) error {
|
||||
if size <= 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -77,16 +77,3 @@ func PreAllocate(size int64, out *os.File) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
FSCTL_SET_SPARSE = 0x000900c4
|
||||
)
|
||||
|
||||
// SetSparse makes the file be a sparse file
|
||||
func SetSparse(out *os.File) error {
|
||||
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
77
backend/onedrive/onedrive.go
Executable file → Normal file
77
backend/onedrive/onedrive.go
Executable file → Normal file
@@ -184,28 +184,6 @@ func init() {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
if opts.Path == "/me/drives" {
|
||||
opts.Path = "/me/drive"
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
if drive.DriveID == meDrive.DriveID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// add the me drive if not found already
|
||||
if !found {
|
||||
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
|
||||
drives.Drives = append(drives.Drives, meDrive)
|
||||
}
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
@@ -248,9 +226,8 @@ func init() {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
|
||||
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
|
||||
Note that the chunks will be buffered into memory.`,
|
||||
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
|
||||
that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -274,16 +251,6 @@ delete OneNote files or otherwise want them to show up in directory
|
||||
listing, set this option.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "server_side_across_configs",
|
||||
Default: false,
|
||||
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
|
||||
|
||||
This can be useful if you wish to do a server side copy between two
|
||||
different Onedrives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -336,12 +303,11 @@ configurations.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
@@ -436,8 +402,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
|
||||
}
|
||||
}
|
||||
case 507: // Insufficient Storage
|
||||
return false, fserrors.FatalError(err)
|
||||
}
|
||||
}
|
||||
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
@@ -612,7 +576,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
}).Fill(f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
@@ -1025,13 +988,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check we aren't overwriting a file on the same remote
|
||||
if srcObj.fs == f {
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
srcPath := srcObj.rootPath()
|
||||
dstPath := f.rootPath(remote)
|
||||
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
||||
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
@@ -1612,7 +1572,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
@@ -1625,7 +1585,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
ContentLength: &toSend,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
|
||||
Body: chunk,
|
||||
Options: options,
|
||||
}
|
||||
_, _ = chunk.Seek(skip, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
@@ -1683,7 +1642,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
@@ -1734,7 +1693,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
}
|
||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
|
||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1747,7 +1706,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
// Update the content of a remote file within 4MB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
@@ -1764,7 +1723,6 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
} else {
|
||||
opts = rest.Opts{
|
||||
@@ -1772,7 +1730,6 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
|
||||
ContentLength: &size,
|
||||
Body: in,
|
||||
Options: options,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1814,9 +1771,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime)
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
@@ -687,9 +687,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
Name: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Options: options,
|
||||
Path: "/upload/create_file.json",
|
||||
Method: "POST",
|
||||
Path: "/upload/create_file.json",
|
||||
}
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
@@ -971,9 +970,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
|
||||
// fs.Debugf(nil, "PreOpen: %#v", openUploadData)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Options: options,
|
||||
Path: "/upload/open_file_upload.json",
|
||||
Method: "POST",
|
||||
Path: "/upload/open_file_upload.json",
|
||||
}
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
|
||||
@@ -41,7 +41,8 @@ const (
|
||||
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootID = "d0" // ID of root folder is always this
|
||||
rootURL = "https://api.pcloud.com"
|
||||
)
|
||||
|
||||
@@ -88,19 +89,13 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote pcloud
|
||||
@@ -270,8 +265,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return err
|
||||
})
|
||||
|
||||
// Get rootFolderID
|
||||
rootID := f.opt.RootFolderID
|
||||
// Get rootID
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
@@ -1080,7 +1074,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size,
|
||||
Parameters: url.Values{},
|
||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||
Options: options,
|
||||
}
|
||||
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
opts.Parameters.Set("filename", leaf)
|
||||
|
||||
@@ -517,7 +517,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
@@ -1002,7 +1002,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Method: "POST",
|
||||
Path: "/folder/uploadinfo",
|
||||
Parameters: o.fs.baseParams(),
|
||||
Options: options,
|
||||
MultipartParams: url.Values{
|
||||
"id": {directoryID},
|
||||
},
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -35,8 +34,7 @@ type Fs struct {
|
||||
client *putio.Client // client for making API calls to Put.io
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
httpClient *http.Client // base http client
|
||||
oAuthClient *http.Client // http client with oauth Authorization
|
||||
oAuthClient *http.Client
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -61,12 +59,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a putio 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
@@ -76,9 +68,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root = parsePath(root)
|
||||
httpClient := fshttp.NewClient(fs.Config)
|
||||
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure putio")
|
||||
}
|
||||
@@ -88,7 +78,6 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
client: putio.NewClient(oAuthClient),
|
||||
httpClient: httpClient,
|
||||
oAuthClient: oAuthClient,
|
||||
}
|
||||
p.features = (&fs.Features{
|
||||
@@ -264,7 +253,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options)
|
||||
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -284,7 +273,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
return f.newObjectWithInfo(ctx, remote, entry)
|
||||
}
|
||||
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
|
||||
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
|
||||
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
|
||||
@@ -299,7 +288,6 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
|
||||
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
|
||||
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
retry, err := shouldRetry(err)
|
||||
if retry {
|
||||
|
||||
@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
req.Header.Set(header, value)
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
|
||||
@@ -864,76 +864,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
|
||||
bucketInit, err := f.svc.Bucket(bucket, f.zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxLimit := int(listLimitSize)
|
||||
var marker *string
|
||||
for {
|
||||
req := qs.ListMultipartUploadsInput{
|
||||
Limit: &maxLimit,
|
||||
KeyMarker: marker,
|
||||
}
|
||||
var resp *qs.ListMultipartUploadsOutput
|
||||
resp, err = bucketInit.ListMultipartUploads(&req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "clean up bucket list multipart uploads")
|
||||
}
|
||||
for _, upload := range resp.Uploads {
|
||||
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
|
||||
age := time.Since(*upload.Created)
|
||||
if age > 24*time.Hour {
|
||||
fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
||||
req := qs.AbortMultipartUploadInput{
|
||||
UploadID: upload.UploadID,
|
||||
}
|
||||
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
|
||||
if abortErr != nil {
|
||||
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
|
||||
}
|
||||
}
|
||||
}
|
||||
if resp.HasMore != nil && !*resp.HasMore {
|
||||
break
|
||||
}
|
||||
// Use NextMarker if set, otherwise use last Key
|
||||
if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
|
||||
fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
|
||||
break
|
||||
} else {
|
||||
marker = resp.NextKeyMarker
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
if f.rootBucket != "" {
|
||||
return f.cleanUpBucket(ctx, f.rootBucket)
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
@@ -1160,10 +1090,9 @@ func (o *Object) MimeType(ctx context.Context) string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -341,17 +341,12 @@ func (mu *multiUploader) abort() error {
|
||||
}
|
||||
|
||||
// multiPartUpload upload a multiple object into QingStor
|
||||
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
// Initiate an multi-part upload
|
||||
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
|
||||
var err error
|
||||
//Initiate an multi-part upload
|
||||
if err = mu.initiate(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
// Abort the transfer if returning an error
|
||||
if err != nil {
|
||||
_ = mu.abort()
|
||||
}
|
||||
}()
|
||||
|
||||
ch := make(chan chunk, mu.cfg.concurrency)
|
||||
for i := 0; i < mu.cfg.concurrency; i++ {
|
||||
@@ -405,5 +400,9 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
||||
close(ch)
|
||||
mu.wg.Wait()
|
||||
// Complete Multipart Upload
|
||||
return mu.complete()
|
||||
err = mu.complete()
|
||||
if mu.getErr() != nil || err != nil {
|
||||
_ = mu.abort()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
177
backend/s3/s3.go
177
backend/s3/s3.go
@@ -641,7 +641,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -652,22 +652,10 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: "AES256",
|
||||
Help: "AES256",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: "If using KMS ID you must provide the ARN of Key.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -675,24 +663,6 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "arn:aws:kms:us-east-1:*",
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
@@ -784,13 +754,8 @@ The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -924,9 +889,6 @@ type Options struct {
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
@@ -946,18 +908,19 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
pool *pool.Pool // memory pool
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
poolMu sync.Mutex // mutex protecting memory pools map
|
||||
pools map[int64]*pool.Pool // memory pools
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -1076,12 +1039,6 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
def := defaults.Get()
|
||||
def.Config.HTTPClient = lowTimeoutClient
|
||||
|
||||
// start a new AWS session
|
||||
awsSession, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "NewSession")
|
||||
}
|
||||
|
||||
// first provider to supply a credential set "wins"
|
||||
providers := []credentials.Provider{
|
||||
// use static credentials if they're present (checked by provider)
|
||||
@@ -1101,7 +1058,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
|
||||
// Pick up IAM role in case we're on EC2
|
||||
&ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(awsSession, &aws.Config{
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{
|
||||
HTTPClient: lowTimeoutClient,
|
||||
}),
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
@@ -1136,7 +1093,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(0). // Rely on rclone's retry logic
|
||||
WithMaxRetries(fs.Config.LowLevelRetries).
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
@@ -1243,20 +1200,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
|
||||
// Setting it to 1 because in context of pacer it means 1 attempt.
|
||||
pc.SetRetries(1)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pc,
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
opt.UploadConcurrency*fs.Config.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
pools: make(map[int64]*pool.Pool),
|
||||
}
|
||||
|
||||
f.setRoot(root)
|
||||
@@ -1506,7 +1463,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
@@ -1738,12 +1695,12 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
|
||||
}
|
||||
if awsErr, ok := err.(awserr.Error); ok {
|
||||
if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
if err, ok := err.(awserr.Error); ok {
|
||||
if err.Code() == "BucketAlreadyOwnedByYou" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucket)
|
||||
})
|
||||
@@ -1947,16 +1904,19 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
_, ok := f.pools[size]
|
||||
if !ok {
|
||||
f.pools[size] = pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(f.opt.ChunkSize),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
return f.pools[size]
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -2123,35 +2083,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
Bucket: &bucket,
|
||||
Key: &bucketPath,
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
httpReq, resp := o.fs.c.GetObjectRequest(&req)
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
case *fs.HTTPOption:
|
||||
key, value := option.Header()
|
||||
httpReq.HTTPRequest.Header.Add(key, value)
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
var resp *s3.GetObjectOutput
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
|
||||
err = httpReq.Send()
|
||||
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
|
||||
return o.fs.shouldRetry(err)
|
||||
})
|
||||
if err, ok := err.(awserr.RequestFailure); ok {
|
||||
@@ -2253,16 +2200,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
// return the memory and token
|
||||
memPool.Put(buf)
|
||||
tokens.Put()
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
@@ -2271,12 +2211,10 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 1 { // end if no data and if not first chunk
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return errors.Wrap(err, "multipart upload failed to read source")
|
||||
}
|
||||
buf = buf[:n]
|
||||
@@ -2285,7 +2223,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
|
||||
off += int64(n)
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
partLength := int64(len(buf))
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
@@ -2323,6 +2260,11 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
// return the memory and token
|
||||
memPool.Put(buf[:partSize])
|
||||
tokens.Put()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
}
|
||||
@@ -2408,15 +2350,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
@@ -2463,18 +2396,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
httpReq.Header = headers
|
||||
httpReq.ContentLength = size
|
||||
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.HTTPOption:
|
||||
key, value := option.Header()
|
||||
httpReq.Header.Add(key, value)
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Do(httpReq)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
package api
|
||||
|
||||
// Some api objects are duplicated with only small differences,
|
||||
// it's because the returned JSON objects are very inconsistent between api calls
|
||||
|
||||
// AuthenticationRequest contains user credentials
|
||||
type AuthenticationRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
// AuthenticationResult is returned by a call to the authentication api
|
||||
type AuthenticationResult struct {
|
||||
Token string `json:"token"`
|
||||
Errors []string `json:"non_field_errors"`
|
||||
}
|
||||
|
||||
// AccountInfo contains simple user properties
|
||||
type AccountInfo struct {
|
||||
Usage int64 `json:"usage"`
|
||||
Total int64 `json:"total"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ServerInfo contains server information
|
||||
type ServerInfo struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// DefaultLibrary when none specified
|
||||
type DefaultLibrary struct {
|
||||
ID string `json:"repo_id"`
|
||||
Exists bool `json:"exists"`
|
||||
}
|
||||
|
||||
// CreateLibraryRequest contains the information needed to create a library
|
||||
type CreateLibraryRequest struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"desc"`
|
||||
Password string `json:"passwd"`
|
||||
}
|
||||
|
||||
// Library properties. Please note not all properties are going to be useful for rclone
|
||||
type Library struct {
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Owner string `json:"owner"`
|
||||
ID string `json:"id"`
|
||||
Size int `json:"size"`
|
||||
Name string `json:"name"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls
|
||||
type CreateLibrary struct {
|
||||
ID string `json:"repo_id"`
|
||||
Name string `json:"repo_name"`
|
||||
}
|
||||
|
||||
// FileType is either "dir" or "file"
|
||||
type FileType string
|
||||
|
||||
// File types
|
||||
var (
|
||||
FileTypeDir FileType = "dir"
|
||||
FileTypeFile FileType = "file"
|
||||
)
|
||||
|
||||
// FileDetail contains file properties (for older api v2.0)
|
||||
type FileDetail struct {
|
||||
ID string `json:"id"`
|
||||
Type FileType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Parent string `json:"parent_dir"`
|
||||
Modified string `json:"last_modified"`
|
||||
}
|
||||
|
||||
// DirEntries contains a list of DirEntry
|
||||
type DirEntries struct {
|
||||
Entries []DirEntry `json:"dirent_list"`
|
||||
}
|
||||
|
||||
// DirEntry contains a directory entry
|
||||
type DirEntry struct {
|
||||
ID string `json:"id"`
|
||||
Type FileType `json:"type"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Path string `json:"parent_dir"`
|
||||
Modified int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
// Operation is move, copy or rename
|
||||
type Operation string
|
||||
|
||||
// Operations
|
||||
var (
|
||||
CopyFileOperation Operation = "copy"
|
||||
MoveFileOperation Operation = "move"
|
||||
RenameFileOperation Operation = "rename"
|
||||
)
|
||||
|
||||
// FileOperationRequest is sent to the api to copy, move or rename a file
|
||||
type FileOperationRequest struct {
|
||||
Operation Operation `json:"operation"`
|
||||
DestinationLibraryID string `json:"dst_repo"` // For copy/move operation
|
||||
DestinationPath string `json:"dst_dir"` // For copy/move operation
|
||||
NewName string `json:"newname"` // Only to be used by the rename operation
|
||||
}
|
||||
|
||||
// FileInfo is returned by a server file copy/move/rename (new api v2.1)
|
||||
type FileInfo struct {
|
||||
Type string `json:"type"`
|
||||
LibraryID string `json:"repo_id"`
|
||||
Path string `json:"parent_dir"`
|
||||
Name string `json:"obj_name"`
|
||||
ID string `json:"obj_id"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// CreateDirRequest only contain an operation field
|
||||
type CreateDirRequest struct {
|
||||
Operation string `json:"operation"`
|
||||
}
|
||||
|
||||
// DirectoryDetail contains the directory details specific to the getDirectoryDetails call
|
||||
type DirectoryDetail struct {
|
||||
ID string `json:"repo_id"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// ShareLinkRequest contains the information needed to create or list shared links
|
||||
type ShareLinkRequest struct {
|
||||
LibraryID string `json:"repo_id"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// SharedLink contains the information returned by a call to shared link creation
|
||||
type SharedLink struct {
|
||||
Link string `json:"link"`
|
||||
IsExpired bool `json:"is_expired"`
|
||||
}
|
||||
|
||||
// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation
|
||||
type BatchSourceDestRequest struct {
|
||||
SrcLibraryID string `json:"src_repo_id"`
|
||||
SrcParentDir string `json:"src_parent_dir"`
|
||||
SrcItems []string `json:"src_dirents"`
|
||||
DstLibraryID string `json:"dst_repo_id"`
|
||||
DstParentDir string `json:"dst_parent_dir"`
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a seafile object (also commonly called a file)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
id string // internal ID of object
|
||||
remote string // The remote path (full path containing library name if target at root)
|
||||
pathInLibrary string // Path of the object without the library name
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
libraryID string // Needed to download the file
|
||||
}
|
||||
|
||||
// ==================== Interface fs.DirEntry ====================
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote string
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns last modified time
|
||||
func (o *Object) ModTime(context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ==================== Interface fs.ObjectInfo ====================
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ==================== Interface fs.Object ====================
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
// The upload sometimes return a temporary 500 error
|
||||
// We cannot use the pacer to retry uploading the file as the upload link is single use only
|
||||
for retry := 0; retry <= 3; retry++ {
|
||||
uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary)
|
||||
if err == ErrorInternalDuringUpload {
|
||||
// This is a temporary error, try again with a new upload link
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the properties from the upload back to the object
|
||||
o.size = uploaded.Size
|
||||
o.id = uploaded.ID
|
||||
|
||||
return nil
|
||||
}
|
||||
return ErrorInternalDuringUpload
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary)
|
||||
}
|
||||
|
||||
// ==================== Optional Interface fs.IDer ====================
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// Use only one pacer per server URL
|
||||
var (
|
||||
pacers map[string]*fs.Pacer
|
||||
pacerMutex sync.Mutex
|
||||
)
|
||||
|
||||
func init() {
|
||||
pacers = make(map[string]*fs.Pacer, 0)
|
||||
}
|
||||
|
||||
// getPacer returns the unique pacer for that remote URL
|
||||
func getPacer(remote string) *fs.Pacer {
|
||||
pacerMutex.Lock()
|
||||
defer pacerMutex.Unlock()
|
||||
|
||||
remote = parseRemote(remote)
|
||||
if existing, found := pacers[remote]; found {
|
||||
return existing
|
||||
}
|
||||
|
||||
pacers[remote] = fs.NewPacer(
|
||||
pacer.NewDefault(
|
||||
pacer.MinSleep(minSleep),
|
||||
pacer.MaxSleep(maxSleep),
|
||||
pacer.DecayConstant(decayConstant),
|
||||
),
|
||||
)
|
||||
return pacers[remote]
|
||||
}
|
||||
|
||||
// parseRemote formats a remote url into "hostname:port"
|
||||
func parseRemote(remote string) string {
|
||||
remoteURL, err := url.Parse(remote)
|
||||
if err != nil {
|
||||
// Return a default value in the very unlikely event we're not going to parse remote
|
||||
fs.Infof(nil, "Cannot parse remote %s", remote)
|
||||
return "default"
|
||||
}
|
||||
host := remoteURL.Hostname()
|
||||
port := remoteURL.Port()
|
||||
if port == "" {
|
||||
if remoteURL.Scheme == "https" {
|
||||
port = "443"
|
||||
} else {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", host, port)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,123 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type pathData struct {
|
||||
configLibrary string // Library specified in the config
|
||||
configRoot string // Root directory specified in the config
|
||||
argumentPath string // Path given as an argument in the command line
|
||||
expectedLibrary string
|
||||
expectedPath string
|
||||
}
|
||||
|
||||
// Test the method to split a library name and a path
|
||||
// from a mix of configuration data and path command line argument
|
||||
func TestSplitPath(t *testing.T) {
|
||||
testData := []pathData{
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: "Library",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("Library", "path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "path",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: "root",
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: "",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: "path",
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: "root",
|
||||
argumentPath: path.Join("path", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "to", "file"),
|
||||
},
|
||||
pathData{
|
||||
configLibrary: "Library",
|
||||
configRoot: path.Join("root", "path"),
|
||||
argumentPath: path.Join("subpath", "to", "file"),
|
||||
expectedLibrary: "Library",
|
||||
expectedPath: path.Join("root", "path", "subpath", "to", "file"),
|
||||
},
|
||||
}
|
||||
for _, test := range testData {
|
||||
fs := &Fs{
|
||||
libraryName: test.configLibrary,
|
||||
rootDirectory: test.configRoot,
|
||||
}
|
||||
libraryName, path := fs.splitPath(test.argumentPath)
|
||||
|
||||
assert.Equal(t, test.expectedLibrary, libraryName)
|
||||
assert.Equal(t, test.expectedPath, path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitPathIntoSlice(t *testing.T) {
|
||||
testData := map[string][]string{
|
||||
"1": {"1"},
|
||||
"/1": {"1"},
|
||||
"/1/": {"1"},
|
||||
"1/2/3": {"1", "2", "3"},
|
||||
}
|
||||
for input, expected := range testData {
|
||||
output := splitPath(input)
|
||||
assert.Equal(t, expected, output)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Test Seafile filesystem interface
|
||||
package seafile_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/seafile"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSeafile:",
|
||||
NilObject: (*seafile.Object)(nil),
|
||||
})
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1102,18 +1102,19 @@ func (o *Object) stat() error {
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
if o.fs.opt.SetModTime {
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
if !o.fs.opt.SetModTime {
|
||||
return nil
|
||||
}
|
||||
err := o.stat()
|
||||
c, err := o.fs.getSftpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime")
|
||||
}
|
||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime failed")
|
||||
}
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "SetModTime stat failed")
|
||||
}
|
||||
|
||||
@@ -1429,9 +1429,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var resp *http.Response
|
||||
var info api.UploadSpecification
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/Items(" + directoryID + ")/Upload2",
|
||||
Options: options,
|
||||
Method: "POST",
|
||||
Path: "/Items(" + directoryID + ")/Upload2",
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)
|
||||
|
||||
@@ -733,7 +733,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
@@ -1320,7 +1320,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
RootURL: o.id,
|
||||
Path: "/data",
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
Body: in,
|
||||
}
|
||||
if size >= 0 {
|
||||
|
||||
@@ -1285,7 +1285,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
m.SetModTime(modTime)
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
uniquePrefix := ""
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||
|
||||
@@ -1,684 +0,0 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
const (
|
||||
existingProvider = "existing"
|
||||
newProvider = "new"
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, configMapper configmap.Mapper) {
|
||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||
|
||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||
|
||||
if provider == newProvider {
|
||||
satelliteString, _ := configMapper.Get("satellite_address")
|
||||
apiKey, _ := configMapper.Get("api_key")
|
||||
passphrase, _ := configMapper.Get("passphrase")
|
||||
|
||||
// satelliteString contains always default and passphrase can be empty
|
||||
if apiKey == "" {
|
||||
return
|
||||
}
|
||||
|
||||
satellite, found := satMap[satelliteString]
|
||||
if !found {
|
||||
satellite = satelliteString
|
||||
}
|
||||
|
||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
}
|
||||
|
||||
serialziedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serialziedAccess)
|
||||
} else if provider == existingProvider {
|
||||
config.FileDeleteKey(name, "satellite_address")
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
config.FileDeleteKey(name, "passphrase")
|
||||
} else {
|
||||
log.Fatalf("Invalid provider type: %s", provider)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose an authentication method.",
|
||||
Required: true,
|
||||
Default: existingProvider,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "existing",
|
||||
Help: "Use an existing access grant.",
|
||||
}, {
|
||||
Value: newProvider,
|
||||
Help: "Create a new access grant from satellite address, API key, and passphrase.",
|
||||
},
|
||||
}},
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access Grant.",
|
||||
Required: false,
|
||||
Provider: "existing",
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
Default: "us-central-1.tardigrade.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us-central-1.tardigrade.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "europe-west-1.tardigrade.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "asia-east-1.tardigrade.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API Key.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
|
||||
Required: false,
|
||||
Provider: newProvider,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Access string `config:"access_grant"`
|
||||
|
||||
SatelliteAddress string `config:"satellite_address"`
|
||||
APIKey string `config:"api_key"`
|
||||
Passphrase string `config:"passphrase"`
|
||||
}
|
||||
|
||||
// Fs represents a remote to Tardigrade
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the filesystem
|
||||
|
||||
opts Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
|
||||
access *uplink.Access // parsed scope
|
||||
|
||||
project *uplink.Project // project client
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Tardigrade.
|
||||
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Setup filesystem and connection to Tardigrade
|
||||
root = norm.NFC.String(root)
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
}
|
||||
|
||||
// Parse config into Options struct
|
||||
err = configstruct.Set(m, &f.opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse access
|
||||
var access *uplink.Access
|
||||
|
||||
if f.opts.Access != "" {
|
||||
access, err = uplink.ParseAccess(f.opts.Access)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
|
||||
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
|
||||
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: access")
|
||||
}
|
||||
}
|
||||
|
||||
if access == nil {
|
||||
return nil, errors.New("access not found")
|
||||
}
|
||||
|
||||
f.access = access
|
||||
|
||||
f.features = (&fs.Features{
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
|
||||
project, err := f.connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.project = project
|
||||
|
||||
// Root validation needs to check the following: If a bucket path is
|
||||
// specified and exists, then the object must be a directory.
|
||||
//
|
||||
// NOTE: At this point this must return the filesystem object we've
|
||||
// created so far even if there is an error.
|
||||
if root != "" {
|
||||
bucketName, bucketPath := bucket.Split(root)
|
||||
|
||||
if bucketName != "" && bucketPath != "" {
|
||||
_, err = project.StatBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return f, errors.Wrap(err, "tardigrade: bucket")
|
||||
}
|
||||
|
||||
object, err := project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err == nil {
|
||||
if !object.IsPrefix {
|
||||
// If the root is actually a file we
|
||||
// need to return the *parent*
|
||||
// directory of the root instead and an
|
||||
// error that the original root
|
||||
// requested is a file.
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// connect opens a connection to Tardigrade.
|
||||
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
fs.Debugf(f, "connecting...")
|
||||
defer fs.Debugf(f, "connected: %+v", err)
|
||||
|
||||
cfg := uplink.Config{}
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "tardigrade: project")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// absolute computes the absolute bucket name and path from the filesystem root
|
||||
// and the relative path provided.
|
||||
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
|
||||
bn, bp := bucket.Split(path.Join(f.root, relative))
|
||||
|
||||
// NOTE: Technically libuplink does not care about the encoding. It is
|
||||
// happy to work with them as opaque byte sequences. However, rclone
|
||||
// has a test that requires two paths with the same normalized form
|
||||
// (but different un-normalized forms) to point to the same file. This
|
||||
// means we have to normalize before we interact with libuplink.
|
||||
return norm.NFC.String(bn), norm.NFC.String(bp)
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FS sj://%s", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// List the objects and directories in relative into entries. The entries can
|
||||
// be returned in any order but should be for a complete directory.
|
||||
//
|
||||
// relative should be "" to list the root, and should not have trailing
|
||||
// slashes.
|
||||
//
|
||||
// This should return fs.ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "ls ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
defer func() {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}()
|
||||
|
||||
if bucketName == "" {
|
||||
if bucketPath != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
|
||||
return f.listObjects(ctx, relative, bucketName, bucketPath)
|
||||
}
|
||||
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "BKT ls")
|
||||
|
||||
buckets := f.project.ListBuckets(ctx, nil)
|
||||
|
||||
for buckets.Next() {
|
||||
bucket := buckets.Item()
|
||||
|
||||
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
|
||||
}
|
||||
|
||||
return entries, buckets.Err()
|
||||
}
|
||||
|
||||
// newDirEntry creates a directory entry from an uplink object.
|
||||
//
|
||||
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
|
||||
// path manipulation here is necessary to cover all the different ways the
|
||||
// filesystem and object could be initialized and combined.
|
||||
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
|
||||
if object.IsPrefix {
|
||||
// . The entry must include the relative path as its prefix. Depending on
|
||||
// | what is being listed and how the filesystem root was initialized the
|
||||
// | relative path may be empty (and so we use path joining here to ensure
|
||||
// | we don't end up with an empty path segment).
|
||||
// |
|
||||
// | . Remove the prefix used during listing.
|
||||
// | |
|
||||
// | | . Remove the trailing slash.
|
||||
// | | |
|
||||
// v v v
|
||||
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, relative, object)
|
||||
}
|
||||
|
||||
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
fs.Debugf(f, "opts %+v", opts)
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
for objects.Next() {
|
||||
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
|
||||
}
|
||||
|
||||
err = objects.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting from dir
|
||||
// recursively into out.
|
||||
//
|
||||
// relative should be "" to start from the root, and should not have trailing
|
||||
// slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read. These need not be
|
||||
// returned in any particular order. If callback returns an error then the
|
||||
// listing will stop immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way of listing
|
||||
// recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "ls -R ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
defer func() {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}()
|
||||
|
||||
if bucketName == "" {
|
||||
if bucketPath != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
|
||||
return f.listBucketsR(ctx, callback)
|
||||
}
|
||||
|
||||
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
|
||||
}
|
||||
|
||||
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "BKT ls -R")
|
||||
|
||||
buckets := f.project.ListBuckets(ctx, nil)
|
||||
|
||||
for buckets.Next() {
|
||||
bucket := buckets.Item()
|
||||
|
||||
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return buckets.Err()
|
||||
}
|
||||
|
||||
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
|
||||
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
Recursive: true,
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
for objects.Next() {
|
||||
object := objects.Item()
|
||||
|
||||
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = objects.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at relative. If it can't be found it returns the
|
||||
// error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "stat ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "err: %+v", err)
|
||||
|
||||
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, relative, object), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
|
||||
// either return an error or upload it properly (rather than e.g. calling
|
||||
// panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
|
||||
|
||||
// Reject options we don't support.
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Errorf(f, "Unsupported mandatory option: %v", option)
|
||||
|
||||
return nil, errors.New("unsupported mandatory option")
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, bucketPath := f.absolute(src.Remote())
|
||||
|
||||
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
|
||||
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = upload.Commit()
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newObjectFromUplink(f, "", upload.Info()), nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate
|
||||
// size.
|
||||
//
|
||||
// May create the object even if it returns an error - if so will return the
|
||||
// object and the error, otherwise will return nil and the error.
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
|
||||
fs.Debugf(f, "mkdir -p ./%s", relative)
|
||||
|
||||
bucketName, _ := f.absolute(relative)
|
||||
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket)
|
||||
//
|
||||
// NOTE: Despite code documentation to the contrary, this method should not
|
||||
// return an error if the directory does not exist.
|
||||
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
|
||||
fs.Debugf(f, "rmdir ./%s", relative)
|
||||
|
||||
bucketName, bucketPath := f.absolute(relative)
|
||||
|
||||
if bucketPath != "" {
|
||||
// If we can successfully stat it, then it is an object (and not a prefix).
|
||||
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
// At this point we know it is not an object,
|
||||
// but we don't know if it is a prefix for one.
|
||||
//
|
||||
// We check this by doing a listing and if we
|
||||
// get any results back, then we know this is a
|
||||
// valid prefix (which implies the directory is
|
||||
// not empty).
|
||||
opts := &uplink.ListObjectsOptions{
|
||||
Prefix: newPrefix(bucketPath),
|
||||
|
||||
System: true,
|
||||
Custom: true,
|
||||
}
|
||||
|
||||
objects := f.project.ListObjects(ctx, bucketName, opts)
|
||||
|
||||
if objects.Next() {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return objects.Err()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
|
||||
_, err = f.project.DeleteBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
if errors.Is(err, uplink.ErrBucketNotEmpty) {
|
||||
return fs.ErrorDirectoryNotEmpty
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newPrefix returns a new prefix for listing conforming to the libuplink
|
||||
// requirements. In particular, libuplink requires a trailing slash for
|
||||
// listings, but rclone does not always provide one. Further, depending on how
|
||||
// the path was initially path normalization may have removed it (e.g. a
|
||||
// trailing slash from the CLI is removed before it ever get's to the backend
|
||||
// code).
|
||||
func newPrefix(prefix string) string {
|
||||
if prefix == "" {
|
||||
return prefix
|
||||
}
|
||||
|
||||
if prefix[len(prefix)-1] == '/' {
|
||||
return prefix
|
||||
}
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
package tardigrade
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
// Object describes a Tardigrade object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
|
||||
absolute string
|
||||
|
||||
size int64
|
||||
created time.Time
|
||||
modified time.Time
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var _ fs.Object = &Object{}
|
||||
|
||||
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
|
||||
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
|
||||
// Attempt to use the modified time from the metadata. Otherwise
|
||||
// fallback to the server time.
|
||||
modified := object.System.Created
|
||||
|
||||
if modifiedStr, ok := object.Custom["rclone:mtime"]; ok {
|
||||
var err error
|
||||
|
||||
modified, err = time.Parse(time.RFC3339Nano, modifiedStr)
|
||||
if err != nil {
|
||||
modified = object.System.Created
|
||||
}
|
||||
}
|
||||
|
||||
bucketName, _ := bucket.Split(path.Join(f.root, relative))
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
|
||||
absolute: norm.NFC.String(bucketName + "/" + object.Key),
|
||||
|
||||
size: object.System.ContentLength,
|
||||
created: object.System.Created,
|
||||
modified: modified,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
// It is possible that we have an empty root (meaning the filesystem is
|
||||
// rooted at the project level). In this case the relative path is just
|
||||
// the full absolute path to the object (including the bucket name).
|
||||
if o.fs.root == "" {
|
||||
return o.absolute
|
||||
}
|
||||
|
||||
// At this point we know that the filesystem itself is at least a
|
||||
// bucket name (and possibly a prefix path).
|
||||
//
|
||||
// . This is necessary to remove the slash.
|
||||
// |
|
||||
// v
|
||||
return o.absolute[len(o.fs.root)+1:]
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modified
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) {
|
||||
fs.Debugf(o, "%s", ty)
|
||||
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute)
|
||||
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) {
|
||||
fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options)
|
||||
|
||||
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||
|
||||
// Convert the semantics of HTTP range headers to an offset and length
|
||||
// that libuplink can use.
|
||||
var (
|
||||
offset int64 = 0
|
||||
length int64 = -1
|
||||
)
|
||||
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
s := opt.Start >= 0
|
||||
e := opt.End >= 0
|
||||
|
||||
switch {
|
||||
case s && e:
|
||||
offset = opt.Start
|
||||
length = (opt.End + 1) - opt.Start
|
||||
case s && !e:
|
||||
offset = opt.Start
|
||||
case !s && e:
|
||||
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset = object.System.ContentLength - opt.End
|
||||
length = opt.End
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = opt.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Errorf(o, "Unsupported mandatory option: %v", option)
|
||||
|
||||
return nil, errors.New("unsupported mandatory option")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(o, "range %d + %d", offset, length)
|
||||
|
||||
return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{
|
||||
Offset: offset,
|
||||
Length: length,
|
||||
})
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
|
||||
|
||||
oNew, err := o.fs.Put(ctx, in, src, options...)
|
||||
|
||||
if err == nil {
|
||||
*o = *(oNew.(*Object))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove this object.
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "rm sj://%s", o.absolute)
|
||||
|
||||
bucketName, bucketPath := bucket.Split(o.absolute)
|
||||
|
||||
_, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath)
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
// +build go1.13,!plan9
|
||||
|
||||
// Test Tardigrade filesystem interface
|
||||
package tardigrade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/tardigrade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestTardigrade:",
|
||||
NilObject: (*tardigrade.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// +build !go1.13 plan9
|
||||
|
||||
package tardigrade
|
||||
@@ -1,167 +0,0 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
*upstream.Object
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
}
|
||||
|
||||
// Directory describes a union Directory
|
||||
//
|
||||
// This is a wrapped object contains all candidates
|
||||
type Directory struct {
|
||||
*upstream.Directory
|
||||
cd []upstream.Entry
|
||||
}
|
||||
|
||||
type entry interface {
|
||||
upstream.Entry
|
||||
candidates() []upstream.Entry
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() *upstream.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
func (o *Object) candidates() []upstream.Entry {
|
||||
return o.co
|
||||
}
|
||||
|
||||
func (d *Directory) candidates() []upstream.Entry {
|
||||
return d.cd
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(entries) == 1 {
|
||||
obj := entries[0].(*upstream.Object)
|
||||
return obj.Update(ctx, in, src, options...)
|
||||
}
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(entries))
|
||||
writers := make([]io.Writer, len(entries))
|
||||
errs := Errors(make([]error, len(entries)+1))
|
||||
for i := range entries {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(entries)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Update(ctx, readers[i], src, options...)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// Remove candidate objects selected by ACTION policy
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.Remove(ctx)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
entries, err := o.fs.actionEntries(o.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if o, ok := entries[i].(*upstream.Object); ok {
|
||||
err := o.SetModTime(ctx, t)
|
||||
errs[i] = errors.Wrap(err, o.UpstreamFs().Name())
|
||||
} else {
|
||||
errs[i] = fs.ErrorNotAFile
|
||||
}
|
||||
})
|
||||
wg.Wait()
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the directory
|
||||
// It returns the latest ModTime of all candidates
|
||||
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
|
||||
entries := d.candidates()
|
||||
times := make([]time.Time, len(entries))
|
||||
multithread(len(entries), func(i int) {
|
||||
times[i] = entries[i].ModTime(ctx)
|
||||
})
|
||||
for _, ti := range times {
|
||||
if t.Before(ti) {
|
||||
t = ti
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Size returns the size of the directory
|
||||
// It returns the sum of all candidates
|
||||
func (d *Directory) Size() (s int64) {
|
||||
for _, e := range d.candidates() {
|
||||
s += e.Size()
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// The Errors type wraps a slice of errors
|
||||
type Errors []error
|
||||
|
||||
// Map returns a copy of the error slice with all its errors modified
|
||||
// according to the mapping function. If mapping returns nil,
|
||||
// the error is dropped from the error slice with no replacement.
|
||||
func (e Errors) Map(mapping func(error) error) Errors {
|
||||
s := make([]error, len(e))
|
||||
i := 0
|
||||
for _, err := range e {
|
||||
nerr := mapping(err)
|
||||
if nerr == nil {
|
||||
continue
|
||||
}
|
||||
s[i] = nerr
|
||||
i++
|
||||
}
|
||||
return Errors(s[:i])
|
||||
}
|
||||
|
||||
// FilterNil returns the Errors without nil
|
||||
func (e Errors) FilterNil() Errors {
|
||||
ne := e.Map(func(err error) error {
|
||||
return err
|
||||
})
|
||||
return ne
|
||||
}
|
||||
|
||||
// Err returns a error interface that filtered nil,
|
||||
// or nil if no non-nil Error is presented.
|
||||
func (e Errors) Err() error {
|
||||
ne := e.FilterNil()
|
||||
if len(ne) == 0 {
|
||||
return nil
|
||||
}
|
||||
return ne
|
||||
}
|
||||
|
||||
// Error returns a concatenated string of the contained errors
|
||||
func (e Errors) Error() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(e) == 0 {
|
||||
buf.WriteString("no error")
|
||||
}
|
||||
if len(e) == 1 {
|
||||
buf.WriteString("1 error: ")
|
||||
} else {
|
||||
fmt.Fprintf(&buf, "%d errors: ", len(e))
|
||||
}
|
||||
|
||||
for i, err := range e {
|
||||
if i != 0 {
|
||||
buf.WriteString("; ")
|
||||
}
|
||||
|
||||
buf.WriteString(err.Error())
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("all", &All{})
|
||||
}
|
||||
|
||||
// All policy behaves the same as EpAll except for the CREATE category
|
||||
// Action category: same as epall.
|
||||
// Create category: apply to all branches.
|
||||
// Search category: same as epall.
|
||||
type All struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *All) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return upstreams, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *All) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epall", &EpAll{})
|
||||
}
|
||||
|
||||
// EpAll stands for existing path, all
|
||||
// Action category: apply to all found.
|
||||
// Create category: apply to all found.
|
||||
// Search category: same as epff.
|
||||
type EpAll struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
func (p *EpAll) epall(ctx context.Context, upstreams []*upstream.Fs, filePath string) ([]*upstream.Fs, error) {
|
||||
var wg sync.WaitGroup
|
||||
ufs := make([]*upstream.Fs, len(upstreams))
|
||||
for i, u := range upstreams {
|
||||
wg.Add(1)
|
||||
i, u := i, u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if findEntry(ctx, rfs, remote) != nil {
|
||||
ufs[i] = u
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
var results []*upstream.Fs
|
||||
for _, f := range ufs {
|
||||
if f != nil {
|
||||
results = append(results, f)
|
||||
}
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return p.epall(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpAll) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path+"/..")
|
||||
return upstreams, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpAll) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epff", &EpFF{})
|
||||
}
|
||||
|
||||
// EpFF stands for existing path, first found
|
||||
// Given the order of the candidates, act on the first one found where the relative path exists.
|
||||
type EpFF struct{}
|
||||
|
||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
ch := make(chan *upstream.Fs)
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if findEntry(ctx, rfs, remote) == nil {
|
||||
u = nil
|
||||
}
|
||||
ch <- u
|
||||
}()
|
||||
}
|
||||
var u *upstream.Fs
|
||||
for i := 0; i < len(upstreams); i++ {
|
||||
u = <-ch
|
||||
if u != nil {
|
||||
// close remaining goroutines
|
||||
go func(num int) {
|
||||
defer close(ch)
|
||||
for i := 0; i < num; i++ {
|
||||
<-ch
|
||||
}
|
||||
}(len(upstreams) - 1 - i)
|
||||
}
|
||||
}
|
||||
if u == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.epff(ctx, upstreams, path)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpFF) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries[:1], nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpFF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.epff(ctx, upstreams, path+"/..")
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpFF) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return entries[:1], nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpFF) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.epff(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpFF) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return entries[0], nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplfs", &EpLfs{})
|
||||
}
|
||||
|
||||
// EpLfs stands for existing path, least free space
|
||||
// Of all the candidates on which the path exists choose the one with the least free space.
|
||||
type EpLfs struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLfs) lfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minFreeSpace int64 = math.MaxInt64
|
||||
var lfsupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
}
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsupstream = u
|
||||
}
|
||||
}
|
||||
if lfsupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lfsupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLfs) lfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minFreeSpace int64
|
||||
var lfsEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
|
||||
}
|
||||
if space < minFreeSpace {
|
||||
minFreeSpace = space
|
||||
lfsEntry = e
|
||||
}
|
||||
}
|
||||
return lfsEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lfsEntries(entries)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplno", &EpLno{})
|
||||
}
|
||||
|
||||
// EpLno stands for existing path, least number of objects
|
||||
// Of all the candidates on which the path exists choose the one with the least number of objects
|
||||
type EpLno struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLno) lno(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minNumObj int64 = math.MaxInt64
|
||||
var lnoUpstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
numObj, err := u.GetNumObjects()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Number of Objects is not supported for upstream %s, treating as 0", u.Name())
|
||||
}
|
||||
if minNumObj > numObj {
|
||||
minNumObj = numObj
|
||||
lnoUpstream = u
|
||||
}
|
||||
}
|
||||
if lnoUpstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lnoUpstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLno) lnoEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minNumObj int64 = math.MaxInt64
|
||||
var lnoEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
numObj, err := e.UpstreamFs().GetNumObjects()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Number of Objects is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
|
||||
}
|
||||
if minNumObj > numObj {
|
||||
minNumObj = numObj
|
||||
lnoEntry = e
|
||||
}
|
||||
}
|
||||
return lnoEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLno) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLno) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lnoEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLno) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lnoEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLno) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lno(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLno) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lnoEntries(entries)
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eplus", &EpLus{})
|
||||
}
|
||||
|
||||
// EpLus stands for existing path, least used space
|
||||
// Of all the candidates on which the path exists choose the one with the least used space.
|
||||
type EpLus struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpLus) lus(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var minUsedSpace int64 = math.MaxInt64
|
||||
var lusupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetUsedSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Used Space is not supported for upstream %s, treating as 0", u.Name())
|
||||
}
|
||||
if space < minUsedSpace {
|
||||
minUsedSpace = space
|
||||
lusupstream = u
|
||||
}
|
||||
}
|
||||
if lusupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return lusupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpLus) lusEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var minUsedSpace int64
|
||||
var lusEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Used Space is not supported for upstream %s, treating as 0", e.UpstreamFs().Name())
|
||||
}
|
||||
if space < minUsedSpace {
|
||||
minUsedSpace = space
|
||||
lusEntry = e
|
||||
}
|
||||
}
|
||||
return lusEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpLus) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpLus) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lusEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpLus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpLus) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.lusEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpLus) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.lus(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpLus) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.lusEntries(entries)
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("epmfs", &EpMfs{})
|
||||
}
|
||||
|
||||
// EpMfs stands for existing path, most free space
|
||||
// Of all the candidates on which the path exists choose the one with the most free space.
|
||||
type EpMfs struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpMfs) mfs(upstreams []*upstream.Fs) (*upstream.Fs, error) {
|
||||
var maxFreeSpace int64
|
||||
var mfsupstream *upstream.Fs
|
||||
for _, u := range upstreams {
|
||||
space, err := u.GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", u.Name())
|
||||
}
|
||||
if maxFreeSpace < space {
|
||||
maxFreeSpace = space
|
||||
mfsupstream = u
|
||||
}
|
||||
}
|
||||
if mfsupstream == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return mfsupstream, nil
|
||||
}
|
||||
|
||||
func (p *EpMfs) mfsEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var maxFreeSpace int64
|
||||
var mfsEntry upstream.Entry
|
||||
for _, e := range entries {
|
||||
space, err := e.UpstreamFs().GetFreeSpace()
|
||||
if err != nil {
|
||||
fs.LogPrintf(fs.LogLevelNotice, nil,
|
||||
"Free Space is not supported for upstream %s, treating as infinite", e.UpstreamFs().Name())
|
||||
}
|
||||
if maxFreeSpace < space {
|
||||
maxFreeSpace = space
|
||||
mfsEntry = e
|
||||
}
|
||||
}
|
||||
return mfsEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpMfs) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.mfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpMfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := p.mfsEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.mfs(upstreams)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.mfsEntries(entries)
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("eprand", &EpRand{})
|
||||
}
|
||||
|
||||
// EpRand stands for existing path, random
|
||||
// Calls epall and then randomizes. Returns one candidate.
|
||||
type EpRand struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *EpRand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *EpRand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
rand.Seed(time.Now().Unix())
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpRand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *EpRand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *EpRand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.EpAll.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *EpRand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.EpAll.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *EpRand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *EpRand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.randEntries(entries), nil
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("ff", &FF{})
|
||||
}
|
||||
|
||||
// FF stands for first found
|
||||
// Search category: same as epff.
|
||||
// Action category: same as epff.
|
||||
// Create category: Given the order of the candiates, act on the first one found.
|
||||
type FF struct {
|
||||
EpFF
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *FF) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return upstreams, fs.ErrorPermissionDenied
|
||||
}
|
||||
return upstreams[:1], nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lfs", &Lfs{})
|
||||
}
|
||||
|
||||
// Lfs stands for least free space
|
||||
// Search category: same as eplfs.
|
||||
// Action category: same as eplfs.
|
||||
// Create category: Pick the drive with the least free space.
|
||||
type Lfs struct {
|
||||
EpLfs
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lno", &Lno{})
|
||||
}
|
||||
|
||||
// Lno stands for least number of objects
|
||||
// Search category: same as eplno.
|
||||
// Action category: same as eplno.
|
||||
// Create category: Pick the drive with the least number of objects.
|
||||
type Lno struct {
|
||||
EpLno
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lno) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lno(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("lus", &Lus{})
|
||||
}
|
||||
|
||||
// Lus stands for least used space
|
||||
// Search category: same as eplus.
|
||||
// Action category: same as eplus.
|
||||
// Create category: Pick the drive with the least used space.
|
||||
type Lus struct {
|
||||
EpLus
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Lus) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.lus(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("mfs", &Mfs{})
|
||||
}
|
||||
|
||||
// Mfs stands for most free space
|
||||
// Search category: same as epmfs.
|
||||
// Action category: same as epmfs.
|
||||
// Create category: Pick the drive with the most free space.
|
||||
type Mfs struct {
|
||||
EpMfs
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Mfs) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.mfs(upstreams)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("newest", &Newest{})
|
||||
}
|
||||
|
||||
// Newest policy picks the file / directory with the largest mtime
|
||||
// It implies the existance of a path
|
||||
type Newest struct {
|
||||
EpAll
|
||||
}
|
||||
|
||||
func (p *Newest) newest(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
var wg sync.WaitGroup
|
||||
ufs := make([]*upstream.Fs, len(upstreams))
|
||||
mtimes := make([]time.Time, len(upstreams))
|
||||
for i, u := range upstreams {
|
||||
wg.Add(1)
|
||||
i, u := i, u // Closure
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
if e := findEntry(ctx, rfs, remote); e != nil {
|
||||
ufs[i] = u
|
||||
mtimes[i] = e.ModTime(ctx)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
maxMtime := time.Time{}
|
||||
var newestFs *upstream.Fs
|
||||
for i, u := range ufs {
|
||||
if u != nil && mtimes[i].After(maxMtime) {
|
||||
maxMtime = mtimes[i]
|
||||
newestFs = u
|
||||
}
|
||||
}
|
||||
if newestFs == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return newestFs, nil
|
||||
}
|
||||
|
||||
func (p *Newest) newestEntries(entries []upstream.Entry) (upstream.Entry, error) {
|
||||
var wg sync.WaitGroup
|
||||
mtimes := make([]time.Time, len(entries))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
for i, e := range entries {
|
||||
wg.Add(1)
|
||||
i, e := i, e // Closure
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
mtimes[i] = e.ModTime(ctx)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
maxMtime := time.Time{}
|
||||
var newestEntry upstream.Entry
|
||||
for i, t := range mtimes {
|
||||
if t.After(maxMtime) {
|
||||
maxMtime = t
|
||||
newestEntry = entries[i]
|
||||
}
|
||||
}
|
||||
if newestEntry == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return newestEntry, nil
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *Newest) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterRO(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.newest(ctx, upstreams, path)
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *Newest) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterROEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
e, err := p.newestEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Newest) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams = filterNC(upstreams)
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
u, err := p.newest(ctx, upstreams, path+"/..")
|
||||
return []*upstream.Fs{u}, err
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *Newest) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
entries = filterNCEntries(entries)
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
e, err := p.newestEntries(entries)
|
||||
return []upstream.Entry{e}, err
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *Newest) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.newest(ctx, upstreams, path)
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *Newest) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.newestEntries(entries)
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var policies = make(map[string]Policy)
|
||||
|
||||
// Policy is the interface of a set of defined behavior choosing
|
||||
// the upstream Fs to operate on
|
||||
type Policy interface {
|
||||
// Action category policy, governing the modification of files and directories
|
||||
Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error)
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error)
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error)
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
SearchEntries(entries ...upstream.Entry) (upstream.Entry, error)
|
||||
}
|
||||
|
||||
func registerPolicy(name string, p Policy) {
|
||||
policies[strings.ToLower(name)] = p
|
||||
}
|
||||
|
||||
// Get a Policy from the list
|
||||
func Get(name string) (Policy, error) {
|
||||
p, ok := policies[strings.ToLower(name)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("didn't find policy called %q", name)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func filterRO(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
|
||||
for _, u := range ufs {
|
||||
if u.IsWritable() {
|
||||
wufs = append(wufs, u)
|
||||
}
|
||||
}
|
||||
return wufs
|
||||
}
|
||||
|
||||
func filterROEntries(ue []upstream.Entry) (wue []upstream.Entry) {
|
||||
for _, e := range ue {
|
||||
if e.UpstreamFs().IsWritable() {
|
||||
wue = append(wue, e)
|
||||
}
|
||||
}
|
||||
return wue
|
||||
}
|
||||
|
||||
func filterNC(ufs []*upstream.Fs) (wufs []*upstream.Fs) {
|
||||
for _, u := range ufs {
|
||||
if u.IsCreatable() {
|
||||
wufs = append(wufs, u)
|
||||
}
|
||||
}
|
||||
return wufs
|
||||
}
|
||||
|
||||
func filterNCEntries(ue []upstream.Entry) (wue []upstream.Entry) {
|
||||
for _, e := range ue {
|
||||
if e.UpstreamFs().IsCreatable() {
|
||||
wue = append(wue, e)
|
||||
}
|
||||
}
|
||||
return wue
|
||||
}
|
||||
|
||||
func parentDir(absPath string) string {
|
||||
parent := path.Dir(strings.TrimRight(absPath, "/"))
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func clean(absPath string) string {
|
||||
cleanPath := path.Clean(absPath)
|
||||
if cleanPath == "." {
|
||||
cleanPath = ""
|
||||
}
|
||||
return cleanPath
|
||||
}
|
||||
|
||||
func findEntry(ctx context.Context, f fs.Fs, remote string) fs.DirEntry {
|
||||
remote = clean(remote)
|
||||
dir := parentDir(remote)
|
||||
entries, err := f.List(ctx, dir)
|
||||
if remote == dir {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// random modtime for root
|
||||
randomNow := time.Unix(time.Now().Unix()-rand.Int63n(10000), 0)
|
||||
return fs.NewDir("", randomNow)
|
||||
}
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
eRemote := e.Remote()
|
||||
if f.Features().CaseInsensitive {
|
||||
found = strings.EqualFold(remote, eRemote)
|
||||
} else {
|
||||
found = (remote == eRemote)
|
||||
}
|
||||
if found {
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerPolicy("rand", &Rand{})
|
||||
}
|
||||
|
||||
// Rand stands for random
|
||||
// Calls all and then randomizes. Returns one candidate.
|
||||
type Rand struct {
|
||||
All
|
||||
}
|
||||
|
||||
func (p *Rand) rand(upstreams []*upstream.Fs) *upstream.Fs {
|
||||
return upstreams[rand.Intn(len(upstreams))]
|
||||
}
|
||||
|
||||
func (p *Rand) randEntries(entries []upstream.Entry) upstream.Entry {
|
||||
return entries[rand.Intn(len(entries))]
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *Rand) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.All.Action(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// ActionEntries is ACTION category policy but receving a set of candidate entries
|
||||
func (p *Rand) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.ActionEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Create category policy, governing the creation of files and directories
|
||||
func (p *Rand) Create(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
upstreams, err := p.All.Create(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []*upstream.Fs{p.rand(upstreams)}, nil
|
||||
}
|
||||
|
||||
// CreateEntries is CREATE category policy but receving a set of candidate entries
|
||||
func (p *Rand) CreateEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
entries, err := p.All.CreateEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []upstream.Entry{p.randEntries(entries)}, nil
|
||||
}
|
||||
|
||||
// Search category policy, governing the access to files and directories
|
||||
func (p *Rand) Search(ctx context.Context, upstreams []*upstream.Fs, path string) (*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
upstreams, err := p.epall(ctx, upstreams, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.rand(upstreams), nil
|
||||
}
|
||||
|
||||
// SearchEntries is SEARCH category policy but receving a set of candidate entries
|
||||
func (p *Rand) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return p.randEntries(entries), nil
|
||||
}
|
||||
@@ -1,57 +1,32 @@
|
||||
package union
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/union/policy"
|
||||
"github.com/rclone/rclone/backend/union/upstream"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "union",
|
||||
Description: "Union merges the contents of several upstream fs",
|
||||
Description: "Union merges the contents of several remotes",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "upstreams",
|
||||
Help: "List of space separated upstreams.\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.\n",
|
||||
Name: "remotes",
|
||||
Help: "List of space separated remotes.\nCan be 'remotea:test/dir remoteb:', '\"remotea:test/space dir\" remoteb:', etc.\nThe last remote is used to write to.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "action_policy",
|
||||
Help: "Policy to choose upstream on ACTION category.",
|
||||
Required: true,
|
||||
Default: "epall",
|
||||
}, {
|
||||
Name: "create_policy",
|
||||
Help: "Policy to choose upstream on CREATE category.",
|
||||
Required: true,
|
||||
Default: "epmfs",
|
||||
}, {
|
||||
Name: "search_policy",
|
||||
Help: "Policy to choose upstream on SEARCH category.",
|
||||
Required: true,
|
||||
Default: "ff",
|
||||
}, {
|
||||
Name: "cache_time",
|
||||
Help: "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.",
|
||||
Required: true,
|
||||
Default: 120,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -59,50 +34,41 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Upstreams fs.SpaceSepList `config:"upstreams"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
|
||||
ActionPolicy string `config:"action_policy"`
|
||||
CreatePolicy string `config:"create_policy"`
|
||||
SearchPolicy string `config:"search_policy"`
|
||||
CacheTime int `config:"cache_time"`
|
||||
Remotes fs.SpaceSepList `config:"remotes"`
|
||||
}
|
||||
|
||||
// Fs represents a union of upstreams
|
||||
// Fs represents a union of remotes
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
upstreams []*upstream.Fs // slice of upstreams
|
||||
hashSet hash.Set // intersection of hash types
|
||||
actionPolicy policy.Policy // policy for ACTION
|
||||
createPolicy policy.Policy // policy for CREATE
|
||||
searchPolicy policy.Policy // policy for SEARCH
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
remotes []fs.Fs // slice of remotes
|
||||
wr fs.Fs // writable remote
|
||||
hashSet hash.Set // intersection of hash types
|
||||
}
|
||||
|
||||
// Wrap candidate objects in to an union Object
|
||||
func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
||||
e, err := f.searchEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch e.(type) {
|
||||
case *upstream.Object:
|
||||
return &Object{
|
||||
Object: e.(*upstream.Object),
|
||||
fs: f,
|
||||
co: entries,
|
||||
}, nil
|
||||
case *upstream.Directory:
|
||||
return &Directory{
|
||||
Directory: e.(*upstream.Directory),
|
||||
cd: entries,
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown object type %T", e)
|
||||
// Object describes a union Object
|
||||
//
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
fs.Object
|
||||
fs *Fs // what this object is part of
|
||||
}
|
||||
|
||||
// Wrap an existing object in the union Object
|
||||
func (f *Fs) wrapObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
fs: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the union Fs as the parent
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
@@ -125,16 +91,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.action(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Rmdir(ctx, dir)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
@@ -144,22 +101,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
|
||||
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Mkdir(ctx, dir)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
@@ -169,21 +111,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
for _, r := range f.upstreams {
|
||||
if r.Features().Purge == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
}
|
||||
upstreams, err := f.action(ctx, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
err := upstreams[i].Features().Purge(ctx)
|
||||
errs[i] = errors.Wrap(err, upstreams[i].Name())
|
||||
})
|
||||
return errs.Err()
|
||||
return f.wr.Features().Purge(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -196,34 +124,15 @@ func (f *Fs) Purge(ctx context.Context) error {
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o := srcObj.UnWrap()
|
||||
su := o.UpstreamFs()
|
||||
if su.Features().Copy == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
if !du.IsCreatable() {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
co, err := du.Features().Copy(ctx, o, remote)
|
||||
if err != nil || co == nil {
|
||||
o, err := f.wr.Features().Copy(ctx, src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wo, err := f.wrapEntries(du.WrapObject(co))
|
||||
return wo.(*Object), err
|
||||
return f.wrapObject(o), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
@@ -236,57 +145,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
if src.Fs() != f.wr {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
entries, err := f.actionEntries(o.candidates()...)
|
||||
o, err := f.wr.Features().Move(ctx, src, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range entries {
|
||||
if e.UpstreamFs().Features().Move == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
}
|
||||
objs := make([]*upstream.Object, len(entries))
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
su := entries[i].UpstreamFs()
|
||||
o, ok := entries[i].(*upstream.Object)
|
||||
if !ok {
|
||||
errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
|
||||
return
|
||||
}
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
|
||||
return
|
||||
}
|
||||
mo, err := du.Features().Move(ctx, o.UnWrap(), remote)
|
||||
if err != nil || mo == nil {
|
||||
errs[i] = errors.Wrap(err, su.Name())
|
||||
return
|
||||
}
|
||||
objs[i] = du.WrapObject(mo)
|
||||
})
|
||||
var en []upstream.Entry
|
||||
for _, o := range objs {
|
||||
if o != nil {
|
||||
en = append(en, o)
|
||||
}
|
||||
}
|
||||
e, err := f.wrapEntries(en...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.(*Object), errs.Err()
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
@@ -298,46 +165,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
sfs, ok := src.(*Fs)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move directory - not same remote type")
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
upstreams, err := sfs.action(ctx, srcRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, u := range upstreams {
|
||||
if u.Features().DirMove == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
su := upstreams[i]
|
||||
var du *upstream.Fs
|
||||
for _, u := range f.upstreams {
|
||||
if operations.Same(u.RootFs, su.RootFs) {
|
||||
du = u
|
||||
}
|
||||
}
|
||||
if du == nil {
|
||||
errs[i] = errors.Wrap(fs.ErrorCantDirMove, su.Name()+":"+su.Root())
|
||||
return
|
||||
}
|
||||
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
|
||||
errs[i] = errors.Wrap(err, du.Name()+":"+du.Root())
|
||||
})
|
||||
errs = errs.FilterNil()
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, e := range errs {
|
||||
if errors.Cause(e) != fs.ErrorDirExists {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
return fs.ErrorDirExists
|
||||
return f.wr.Features().DirMove(ctx, srcFs.wr, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
@@ -350,23 +183,23 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var uChans []chan time.Duration
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
if ChangeNotify := u.Features().ChangeNotify; ChangeNotify != nil {
|
||||
for _, remote := range f.remotes {
|
||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
remoteChans = append(remoteChans, ch)
|
||||
ChangeNotify(ctx, fn, ch)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
for i := range ch {
|
||||
for _, c := range uChans {
|
||||
for _, c := range remoteChans {
|
||||
c <- i
|
||||
}
|
||||
}
|
||||
for _, c := range uChans {
|
||||
for _, c := range remoteChans {
|
||||
close(c)
|
||||
}
|
||||
}()
|
||||
@@ -375,103 +208,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
if do := f.upstreams[i].Features().DirCacheFlush; do != nil {
|
||||
do()
|
||||
for _, remote := range f.remotes {
|
||||
if DirCacheFlush := remote.Features().DirCacheFlush; DirCacheFlush != nil {
|
||||
DirCacheFlush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
srcPath := src.Remote()
|
||||
upstreams, err := f.create(ctx, srcPath)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
if err := f.Mkdir(ctx, parentDir(srcPath)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreams, err = f.create(ctx, srcPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(upstreams) == 1 {
|
||||
u := upstreams[0]
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = u.Features().PutStream(ctx, in, src, options...)
|
||||
} else {
|
||||
o, err = u.Put(ctx, in, src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := f.wrapEntries(u.WrapObject(o))
|
||||
return e.(*Object), err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)+1))
|
||||
// Get multiple reader
|
||||
readers := make([]io.Reader, len(upstreams))
|
||||
writers := make([]io.Writer, len(upstreams))
|
||||
for i := range writers {
|
||||
r, w := io.Pipe()
|
||||
bw := bufio.NewWriter(w)
|
||||
readers[i], writers[i] = r, bw
|
||||
defer func() {
|
||||
err := w.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
mw := io.MultiWriter(writers...)
|
||||
es := make([]error, len(writers)+1)
|
||||
_, es[len(es)-1] = io.Copy(mw, in)
|
||||
for i, bw := range writers {
|
||||
es[i] = bw.(*bufio.Writer).Flush()
|
||||
}
|
||||
errs[len(upstreams)] = Errors(es).Err()
|
||||
}()
|
||||
// Multi-threading
|
||||
objs := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = u.Features().PutStream(ctx, readers[i], src, options...)
|
||||
} else {
|
||||
o, err = u.Put(ctx, readers[i], src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := f.wrapEntries(objs...)
|
||||
return e.(*Object), err
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -481,64 +221,29 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
o, err := f.wr.Features().PutStream(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
usage := &fs.Usage{
|
||||
Total: new(int64),
|
||||
Used: new(int64),
|
||||
Trashed: new(int64),
|
||||
Other: new(int64),
|
||||
Free: new(int64),
|
||||
Objects: new(int64),
|
||||
return f.wr.Features().About(ctx)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.wr.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, u := range f.upstreams {
|
||||
usg, err := u.About(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if usg.Total != nil && usage.Total != nil {
|
||||
*usage.Total += *usg.Total
|
||||
} else {
|
||||
usage.Total = nil
|
||||
}
|
||||
if usg.Used != nil && usage.Used != nil {
|
||||
*usage.Used += *usg.Used
|
||||
} else {
|
||||
usage.Used = nil
|
||||
}
|
||||
if usg.Trashed != nil && usage.Trashed != nil {
|
||||
*usage.Trashed += *usg.Trashed
|
||||
} else {
|
||||
usage.Trashed = nil
|
||||
}
|
||||
if usg.Other != nil && usage.Other != nil {
|
||||
*usage.Other += *usg.Other
|
||||
} else {
|
||||
usage.Other = nil
|
||||
}
|
||||
if usg.Free != nil && usage.Free != nil {
|
||||
*usage.Free += *usg.Free
|
||||
} else {
|
||||
usage.Free = nil
|
||||
}
|
||||
if usg.Objects != nil && usage.Objects != nil {
|
||||
*usage.Objects += *usg.Objects
|
||||
} else {
|
||||
usage.Objects = nil
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
return f.wrapObject(o), err
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
@@ -551,188 +256,60 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entriess := make([][]upstream.Entry, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
entries, err := u.List(ctx, dir)
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
}
|
||||
uEntries := make([]upstream.Entry, len(entries))
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
entriess[i] = uEntries
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
if len(errs) == 0 {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, errs.Err()
|
||||
}
|
||||
return f.mergeDirEntries(entriess)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
var entriess [][]upstream.Entry
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
var mutex sync.Mutex
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
var err error
|
||||
callback := func(entries fs.DirEntries) error {
|
||||
uEntries := make([]upstream.Entry, len(entries))
|
||||
for j, e := range entries {
|
||||
uEntries[j], _ = u.WrapEntry(e)
|
||||
}
|
||||
mutex.Lock()
|
||||
entriess = append(entriess, uEntries)
|
||||
mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
do := u.Features().ListR
|
||||
if do != nil {
|
||||
err = do(ctx, dir, callback)
|
||||
} else {
|
||||
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
||||
set := make(map[string]fs.DirEntry)
|
||||
found := false
|
||||
for _, remote := range f.remotes {
|
||||
var remoteEntries, err = remote.List(ctx, dir)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
return nil, errors.Wrapf(err, "List failed on %v", remote)
|
||||
}
|
||||
})
|
||||
if len(errs) == len(errs.FilterNil()) {
|
||||
errs = errs.Map(func(e error) error {
|
||||
if errors.Cause(e) == fs.ErrorDirNotFound {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
})
|
||||
if len(errs) == 0 {
|
||||
return fs.ErrorDirNotFound
|
||||
found = true
|
||||
for _, remoteEntry := range remoteEntries {
|
||||
set[remoteEntry.Remote()] = remoteEntry
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
entries, err := f.mergeDirEntries(entriess)
|
||||
if err != nil {
|
||||
return err
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return callback(entries)
|
||||
for _, entry := range set {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
entry = f.wrapObject(o)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote union file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
objs := make([]*upstream.Object, len(f.upstreams))
|
||||
errs := Errors(make([]error, len(f.upstreams)))
|
||||
multithread(len(f.upstreams), func(i int) {
|
||||
u := f.upstreams[i]
|
||||
o, err := u.NewObject(ctx, remote)
|
||||
if err != nil && err != fs.ErrorObjectNotFound {
|
||||
errs[i] = errors.Wrap(err, u.Name())
|
||||
return
|
||||
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
||||
func (f *Fs) NewObject(ctx context.Context, path string) (fs.Object, error) {
|
||||
for i := range f.remotes {
|
||||
var remote = f.remotes[len(f.remotes)-i-1]
|
||||
var obj, err = remote.NewObject(ctx, path)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
continue
|
||||
}
|
||||
objs[i] = u.WrapObject(o)
|
||||
})
|
||||
var entries []upstream.Entry
|
||||
for _, o := range objs {
|
||||
if o != nil {
|
||||
entries = append(entries, o)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "NewObject failed on %v", remote)
|
||||
}
|
||||
return f.wrapObject(obj), nil
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
e, err := f.wrapEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e.(*Object), errs.Err()
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Precision is the greatest Precision of all upstreams
|
||||
// Precision is the greatest Precision of all remotes
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
var greatestPrecision time.Duration
|
||||
for _, u := range f.upstreams {
|
||||
if u.Precision() > greatestPrecision {
|
||||
greatestPrecision = u.Precision()
|
||||
for _, remote := range f.remotes {
|
||||
if remote.Precision() > greatestPrecision {
|
||||
greatestPrecision = remote.Precision()
|
||||
}
|
||||
}
|
||||
return greatestPrecision
|
||||
}
|
||||
|
||||
func (f *Fs) action(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.actionPolicy.Action(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) actionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.actionPolicy.ActionEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
||||
return f.createPolicy.Create(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) createEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
||||
return f.createPolicy.CreateEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) search(ctx context.Context, path string) (*upstream.Fs, error) {
|
||||
return f.searchPolicy.Search(ctx, f.upstreams, path)
|
||||
}
|
||||
|
||||
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
||||
return f.searchPolicy.SearchEntries(entries...)
|
||||
}
|
||||
|
||||
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
|
||||
entryMap := make(map[string]([]upstream.Entry))
|
||||
for _, en := range entriess {
|
||||
if en == nil {
|
||||
continue
|
||||
}
|
||||
for _, entry := range en {
|
||||
remote := entry.Remote()
|
||||
if f.Features().CaseInsensitive {
|
||||
remote = strings.ToLower(remote)
|
||||
}
|
||||
entryMap[remote] = append(entryMap[remote], entry)
|
||||
}
|
||||
}
|
||||
var entries fs.DirEntries
|
||||
for path := range entryMap {
|
||||
e, err := f.wrapEntries(entryMap[path]...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, e)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
@@ -743,64 +320,51 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Backward compatible to old config
|
||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
||||
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
||||
}
|
||||
opt.Upstreams = opt.Remotes
|
||||
if len(opt.Remotes) == 0 {
|
||||
return nil, errors.New("union can't point to an empty remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Upstreams) == 0 {
|
||||
return nil, errors.New("union can't point to an empty upstream - check the value of the upstreams setting")
|
||||
if len(opt.Remotes) == 1 {
|
||||
return nil, errors.New("union can't point to a single remote - check the value of the remotes setting")
|
||||
}
|
||||
if len(opt.Upstreams) == 1 {
|
||||
return nil, errors.New("union can't point to a single upstream - check the value of the upstreams setting")
|
||||
}
|
||||
for _, u := range opt.Upstreams {
|
||||
if strings.HasPrefix(u, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the upstreams setting")
|
||||
for _, remote := range opt.Remotes {
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point union remote at itself - check the value of the remote setting")
|
||||
}
|
||||
}
|
||||
|
||||
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
||||
errs := Errors(make([]error, len(opt.Upstreams)))
|
||||
multithread(len(opt.Upstreams), func(i int) {
|
||||
u := opt.Upstreams[i]
|
||||
upstreams[i], errs[i] = upstream.New(u, root, time.Duration(opt.CacheTime)*time.Second)
|
||||
})
|
||||
var usedUpstreams []*upstream.Fs
|
||||
var fserr error
|
||||
for i, err := range errs {
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
var remotes []fs.Fs
|
||||
for i := range opt.Remotes {
|
||||
// Last remote first so we return the correct (last) matching fs in case of fs.ErrorIsFile
|
||||
var remote = opt.Remotes[len(opt.Remotes)-i-1]
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Only the upstreams returns ErrorIsFile would be used if any
|
||||
if err == fs.ErrorIsFile {
|
||||
usedUpstreams = append(usedUpstreams, upstreams[i])
|
||||
fserr = fs.ErrorIsFile
|
||||
var rootString = path.Join(fsPath, filepath.ToSlash(root))
|
||||
if configName != "local" {
|
||||
rootString = configName + ":" + rootString
|
||||
}
|
||||
myFs, err := cache.Get(rootString)
|
||||
if err != nil {
|
||||
if err == fs.ErrorIsFile {
|
||||
return myFs, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
remotes = append(remotes, myFs)
|
||||
}
|
||||
if fserr == nil {
|
||||
usedUpstreams = upstreams
|
||||
|
||||
// Reverse the remotes again so they are in the order as before
|
||||
for i, j := 0, len(remotes)-1; i < j; i, j = i+1, j-1 {
|
||||
remotes[i], remotes[j] = remotes[j], remotes[i]
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
upstreams: usedUpstreams,
|
||||
}
|
||||
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.createPolicy, err = policy.Get(opt.CreatePolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.searchPolicy, err = policy.Get(opt.SearchPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
remotes: remotes,
|
||||
wr: remotes[len(remotes)-1],
|
||||
}
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -812,54 +376,44 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f)
|
||||
for _, f := range upstreams {
|
||||
features = features.Mask(f) // Mask all upstream fs
|
||||
}
|
||||
features = features.Mask(f.wr) // mask the features just on the writable fs
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
for _, u := range upstreams {
|
||||
if u.Features().ListR != nil {
|
||||
features.ListR = f.ListR
|
||||
} else if !u.Features().IsLocal {
|
||||
features.ListR = nil
|
||||
break
|
||||
}
|
||||
// Really need the union of all remotes for these, so
|
||||
// re-instate and calculate separately.
|
||||
features.ChangeNotify = f.ChangeNotify
|
||||
features.DirCacheFlush = f.DirCacheFlush
|
||||
|
||||
// FIXME maybe should be masking the bools here?
|
||||
|
||||
// Clear ChangeNotify and DirCacheFlush if all are nil
|
||||
clearChangeNotify := true
|
||||
clearDirCacheFlush := true
|
||||
for _, remote := range f.remotes {
|
||||
remoteFeatures := remote.Features()
|
||||
if remoteFeatures.ChangeNotify != nil {
|
||||
clearChangeNotify = false
|
||||
}
|
||||
if remoteFeatures.DirCacheFlush != nil {
|
||||
clearDirCacheFlush = false
|
||||
}
|
||||
}
|
||||
if clearChangeNotify {
|
||||
features.ChangeNotify = nil
|
||||
}
|
||||
if clearDirCacheFlush {
|
||||
features.DirCacheFlush = nil
|
||||
}
|
||||
|
||||
f.features = features
|
||||
|
||||
// Get common intersection of hashes
|
||||
hashSet := f.upstreams[0].Hashes()
|
||||
for _, u := range f.upstreams[1:] {
|
||||
hashSet = hashSet.Overlap(u.Hashes())
|
||||
hashSet := f.remotes[0].Hashes()
|
||||
for _, remote := range f.remotes[1:] {
|
||||
hashSet = hashSet.Overlap(remote.Hashes())
|
||||
}
|
||||
f.hashSet = hashSet
|
||||
|
||||
return f, fserr
|
||||
}
|
||||
|
||||
func parentDir(absPath string) string {
|
||||
parent := path.Dir(strings.TrimRight(filepath.ToSlash(absPath), "/"))
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func multithread(num int, fn func(int)) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < num; i++ {
|
||||
wg.Add(1)
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
fn(i)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
@@ -873,5 +427,4 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -2,154 +2,17 @@
|
||||
package union_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-standard1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-standard2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-standard3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnion"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestRO(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-ro1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-ro2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-ro3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":ro " + tempdir3 + ":ro"
|
||||
name := "TestUnionRO"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNC(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-nc1")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-nc2")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-nc3")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + ":nc " + tempdir3 + ":nc"
|
||||
name := "TestUnionNC"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "epall"},
|
||||
{Name: name, Key: "create_policy", Value: "epmfs"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy1(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy11")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy12")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy13")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy1"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "lus"},
|
||||
{Name: name, Key: "search_policy", Value: "all"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolicy2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir1 := filepath.Join(os.TempDir(), "rclone-union-test-policy21")
|
||||
tempdir2 := filepath.Join(os.TempDir(), "rclone-union-test-policy22")
|
||||
tempdir3 := filepath.Join(os.TempDir(), "rclone-union-test-policy23")
|
||||
require.NoError(t, os.MkdirAll(tempdir1, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir2, 0744))
|
||||
require.NoError(t, os.MkdirAll(tempdir3, 0744))
|
||||
upstreams := tempdir1 + " " + tempdir2 + " " + tempdir3
|
||||
name := "TestUnionPolicy2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "union"},
|
||||
{Name: name, Key: "upstreams", Value: upstreams},
|
||||
{Name: name, Key: "action_policy", Value: "all"},
|
||||
{Name: name, Key: "create_policy", Value: "rand"},
|
||||
{Name: name, Key: "search_policy", Value: "ff"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
RemoteName: "TestUnion:",
|
||||
NilObject: nil,
|
||||
SkipFsMatch: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,348 +0,0 @@
|
||||
package upstream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUsageFieldNotSupported stats the usage field is not supported by the backend
|
||||
ErrUsageFieldNotSupported = errors.New("this usage field is not supported")
|
||||
)
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
writable bool
|
||||
creatable bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
}
|
||||
|
||||
// Directory describes a wrapped Directory
|
||||
//
|
||||
// This is a wrapped Directory which contains the upstream Fs
|
||||
type Directory struct {
|
||||
fs.Directory
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Object describes a wrapped Object
|
||||
//
|
||||
// This is a wrapped Object which contains the upstream Fs
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// Entry describe a warpped fs.DirEntry interface with the
|
||||
// information of upstream Fs
|
||||
type Entry interface {
|
||||
fs.DirEntry
|
||||
UpstreamFs() *Fs
|
||||
}
|
||||
|
||||
// New creates a new Fs based on the
|
||||
// string formatted `type:root_path(:ro/:nc)`
|
||||
func New(remote, root string, cacheTime time.Duration) (*Fs, error) {
|
||||
_, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
RootPath: root,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheExpiry: time.Now().Unix(),
|
||||
cacheTime: cacheTime,
|
||||
usage: &fs.Usage{},
|
||||
}
|
||||
if strings.HasSuffix(fsPath, ":ro") {
|
||||
f.writable = false
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
} else if strings.HasSuffix(fsPath, ":nc") {
|
||||
f.writable = true
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
}
|
||||
if configName != "local" {
|
||||
fsPath = configName + ":" + fsPath
|
||||
}
|
||||
rFs, err := cache.Get(fsPath)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := path.Join(fsPath, filepath.ToSlash(root))
|
||||
myFs, err := cache.Get(rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = myFs
|
||||
return f, err
|
||||
}
|
||||
|
||||
// WrapDirectory wraps a fs.Directory to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &Directory{
|
||||
Directory: e,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapObject wraps a fs.Object to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapObject(o fs.Object) *Object {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapEntry wraps a fs.DirEntry to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
|
||||
switch e.(type) {
|
||||
case fs.Object:
|
||||
return f.WrapObject(e.(fs.Object)), nil
|
||||
case fs.Directory:
|
||||
return f.WrapDirectory(e.(fs.Directory)), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown object type %T", e)
|
||||
}
|
||||
}
|
||||
|
||||
// UpstreamFs get the upstream Fs the entry is stored in
|
||||
func (e *Directory) UpstreamFs() *Fs {
|
||||
return e.f
|
||||
}
|
||||
|
||||
// UpstreamFs get the upstream Fs the entry is stored in
|
||||
func (o *Object) UpstreamFs() *Fs {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or
|
||||
// nil if it isn't wrapping anything
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// IsCreatable return if the fs is allowed to create new objects
|
||||
func (f *Fs) IsCreatable() bool {
|
||||
return f.creatable
|
||||
}
|
||||
|
||||
// IsWritable return if the fs is allowed to write
|
||||
func (f *Fs) IsWritable() bool {
|
||||
return f.writable
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.Fs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
size := src.Size()
|
||||
if f.usage.Used != nil {
|
||||
*f.usage.Used += size
|
||||
}
|
||||
if f.usage.Free != nil {
|
||||
*f.usage.Free -= size
|
||||
}
|
||||
if f.usage.Objects != nil {
|
||||
*f.usage.Objects++
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Features().PutStream
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
o, err := do(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return o, err
|
||||
}
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
size := o.Size()
|
||||
if f.usage.Used != nil {
|
||||
*f.usage.Used += size
|
||||
}
|
||||
if f.usage.Free != nil {
|
||||
*f.usage.Free -= size
|
||||
}
|
||||
if f.usage.Objects != nil {
|
||||
*f.usage.Objects++
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
size := o.Size()
|
||||
err := o.Object.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.f.cacheMutex.Lock()
|
||||
defer o.f.cacheMutex.Unlock()
|
||||
delta := o.Size() - size
|
||||
if delta <= 0 {
|
||||
return nil
|
||||
}
|
||||
if o.f.usage.Used != nil {
|
||||
*o.f.usage.Used += size
|
||||
}
|
||||
if o.f.usage.Free != nil {
|
||||
*o.f.usage.Free -= size
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return nil, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
return f.usage, nil
|
||||
}
|
||||
|
||||
// GetFreeSpace get the free space of the fs
|
||||
func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return math.MaxInt64, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Free == nil {
|
||||
return math.MaxInt64, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Free, nil
|
||||
}
|
||||
|
||||
// GetUsedSpace get the used space of the fs
|
||||
func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Used == nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Used, nil
|
||||
}
|
||||
|
||||
// GetNumObjects get the number of objects of the fs
|
||||
func (f *Fs) GetNumObjects() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
}
|
||||
f.cacheMutex.RLock()
|
||||
defer f.cacheMutex.RUnlock()
|
||||
if f.usage.Objects == nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
}
|
||||
return *f.usage.Objects, nil
|
||||
}
|
||||
|
||||
func (f *Fs) updateUsage() (err error) {
|
||||
if do := f.RootFs.Features().About; do == nil {
|
||||
return ErrUsageFieldNotSupported
|
||||
}
|
||||
done := false
|
||||
f.cacheOnce.Do(func() {
|
||||
f.cacheMutex.Lock()
|
||||
err = f.updateUsageCore(false)
|
||||
f.cacheMutex.Unlock()
|
||||
done = true
|
||||
})
|
||||
if done {
|
||||
return err
|
||||
}
|
||||
if !f.cacheUpdate {
|
||||
f.cacheUpdate = true
|
||||
go func() {
|
||||
_ = f.updateUsageCore(true)
|
||||
f.cacheUpdate = false
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) updateUsageCore(lock bool) error {
|
||||
// Run in background, should not be cancelled by user
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
usage, err := f.RootFs.Features().About(ctx)
|
||||
if err != nil {
|
||||
f.cacheUpdate = false
|
||||
return err
|
||||
}
|
||||
if lock {
|
||||
f.cacheMutex.Lock()
|
||||
defer f.cacheMutex.Unlock()
|
||||
}
|
||||
// Store usage
|
||||
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
|
||||
f.usage = usage
|
||||
return nil
|
||||
}
|
||||
@@ -989,14 +989,13 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
return nil, errors.Wrap(err, "about call failed")
|
||||
}
|
||||
usage := &fs.Usage{}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
if q.Available >= 0 {
|
||||
usage.Free = fs.NewUsageValue(q.Available)
|
||||
}
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
if q.Available != 0 || q.Used != 0 {
|
||||
if q.Available >= 0 && q.Used >= 0 {
|
||||
usage.Total = fs.NewUsageValue(q.Available + q.Used)
|
||||
}
|
||||
if q.Used >= 0 {
|
||||
usage.Used = fs.NewUsageValue(q.Used)
|
||||
}
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
@@ -1135,7 +1134,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
|
||||
@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
|
||||
// prepare upload
|
||||
var resp *http.Response
|
||||
var ur api.AsyncInfo
|
||||
@@ -1073,7 +1073,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
|
||||
Method: "GET",
|
||||
Path: "/resources/upload",
|
||||
Parameters: url.Values{},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
@@ -1122,7 +1121,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
//upload file
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# Email addresses to ignore in the git log when making the authors.md file
|
||||
<nick@raig-wood.com>
|
||||
<anaghk.dos@gmail.com>
|
||||
@@ -1,136 +0,0 @@
|
||||
// +build ignore
|
||||
|
||||
// Attempt to work out if branches have already been merged
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
master = flag.String("master", "master", "Branch to work out if merged into")
|
||||
version = "development version" // overridden by goreleaser
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: %s [options]
|
||||
Version: %s
|
||||
|
||||
Attempt to work out if in the current git repo branches have been
|
||||
merged into master.
|
||||
|
||||
Example usage:
|
||||
|
||||
%s
|
||||
|
||||
Full options:
|
||||
`, os.Args[0], version, os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
var (
|
||||
printedSep = false
|
||||
)
|
||||
|
||||
const (
|
||||
sep1 = "============================================================"
|
||||
sep2 = "------------------------------------------------------------"
|
||||
)
|
||||
|
||||
// Show the diff between two git revisions
|
||||
func gitDiffDiff(rev1, rev2 string) {
|
||||
fmt.Printf("Diff of diffs of %q and %q\n", rev1, rev2)
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf(`diff <(git show "%s") <(git show "%s")`, rev1, rev2))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
var exitErr *exec.ExitError
|
||||
if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 {
|
||||
// OK just different
|
||||
} else {
|
||||
log.Fatalf("git diff diff failed: %#v", err)
|
||||
}
|
||||
}
|
||||
_, _ = os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
var reCommit = regexp.MustCompile(`commit ([0-9a-f]{32,})`)
|
||||
|
||||
// Grep the git log for logLine
|
||||
func gitLogGrep(branch, rev, logLine string) {
|
||||
cmd := exec.Command("git", "log", "--grep", regexp.QuoteMeta(logLine), *master)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("git log grep failed: %v", err)
|
||||
}
|
||||
if len(out) > 0 {
|
||||
if !printedSep {
|
||||
fmt.Println(sep1)
|
||||
printedSep = true
|
||||
}
|
||||
fmt.Printf("Branch: %s - MAY BE MERGED to %s\nLog: %s\n\n", branch, *master, logLine)
|
||||
_, _ = os.Stdout.Write(out)
|
||||
match := reCommit.FindSubmatch(out)
|
||||
if len(match) != 0 {
|
||||
commit := string(match[1])
|
||||
fmt.Println(sep2)
|
||||
gitDiffDiff(rev, commit)
|
||||
}
|
||||
fmt.Println(sep1)
|
||||
}
|
||||
}
|
||||
|
||||
// * b2-fix-download-url 4209c768a [gone] b2: fix transfers when using download_url
|
||||
var reLine = regexp.MustCompile(`^[ *] (\S+)\s+([0-9a-f]+)\s+(?:\[[^]]+\] )?(.*)$`)
|
||||
|
||||
// Run git branch -v, parse the output and check it in the log
|
||||
func gitBranch() {
|
||||
cmd := exec.Command("git", "branch", "-v")
|
||||
cmd.Stderr = os.Stderr
|
||||
out, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatalf("git branch pipe failed: %v", err)
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("git branch failed: %v", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(out)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match := reLine.FindStringSubmatch(line)
|
||||
if len(match) != 4 {
|
||||
log.Printf("Invalid line %q", line)
|
||||
continue
|
||||
}
|
||||
branch, rev, logLine := match[1], match[2], match[3]
|
||||
if branch == *master {
|
||||
continue
|
||||
}
|
||||
//fmt.Printf("branch = %q, rev = %q, logLine = %q\n", branch, rev, logLine)
|
||||
gitLogGrep(branch, rev, logLine)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("failed reading git branch: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Fatalf("git branch wait failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
usage()
|
||||
log.Fatal("Wrong number of arguments")
|
||||
}
|
||||
gitBranch()
|
||||
}
|
||||
@@ -182,7 +182,6 @@ func compileArch(version, goos, goarch, dir string) bool {
|
||||
args := []string{
|
||||
"go", "build",
|
||||
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
|
||||
"-trimpath",
|
||||
"-i",
|
||||
"-o", output,
|
||||
"-tags", *tags,
|
||||
|
||||
@@ -4,7 +4,6 @@ Make backend documentation
|
||||
"""
|
||||
|
||||
import os
|
||||
import io
|
||||
import subprocess
|
||||
|
||||
marker = "<!--- autogenerated options"
|
||||
@@ -20,11 +19,6 @@ def output_docs(backend, out):
|
||||
out.flush()
|
||||
subprocess.check_call(["rclone", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
doc_file = "docs/content/"+backend+".md"
|
||||
@@ -41,7 +35,6 @@ def alter_doc(backend):
|
||||
start_full = start + " - DO NOT EDIT, instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs -->\n" % (backend, backend)
|
||||
out_file.write(start_full)
|
||||
output_docs(backend, out_file)
|
||||
output_backend_tool_docs(backend, out_file)
|
||||
out_file.write(stop+" -->\n")
|
||||
altered = True
|
||||
if not in_docs:
|
||||
|
||||
@@ -54,10 +54,8 @@ docs = [
|
||||
"pcloud.md",
|
||||
"premiumizeme.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
@@ -112,7 +110,7 @@ def read_doc(doc):
|
||||
# Remove icons
|
||||
contents = re.sub(r'<i class="fa.*?</i>\s*', "", contents)
|
||||
# Make [...](/links/) absolute
|
||||
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
|
||||
contents = re.sub(r'\((\/.*?\/)\)', r"(https://rclone.org\1)", contents)
|
||||
# Interpret provider shortcode
|
||||
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
contents = re.sub(r'\{\{<\s+provider.*?name="(.*?)".*?>\}\}', r"\1", contents)
|
||||
|
||||
@@ -7,15 +7,17 @@ import re
|
||||
import subprocess
|
||||
|
||||
AUTHORS = "docs/content/authors.md"
|
||||
IGNORE = "bin/.ignore-emails"
|
||||
IGNORE = [ "nick@raig-wood.com" ]
|
||||
|
||||
def load(filename):
|
||||
def load():
|
||||
"""
|
||||
returns a set of emails already in the file
|
||||
returns a set of emails already in authors.md
|
||||
"""
|
||||
with open(filename) as fd:
|
||||
with open(AUTHORS) as fd:
|
||||
authors = fd.read()
|
||||
return set(re.findall(r"<(.*?)>", authors))
|
||||
emails = set(re.findall(r"<(.*?)>", authors))
|
||||
emails.update(IGNORE)
|
||||
return emails
|
||||
|
||||
def add_email(name, email):
|
||||
"""
|
||||
@@ -30,9 +32,7 @@ def main():
|
||||
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
|
||||
out = out.decode("utf-8")
|
||||
|
||||
ignored = load(IGNORE)
|
||||
previous = load(AUTHORS)
|
||||
previous.update(ignored)
|
||||
previous = load()
|
||||
for line in out.split("\n"):
|
||||
line = line.strip()
|
||||
if line == "":
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd"
|
||||
_ "github.com/rclone/rclone/cmd/about"
|
||||
_ "github.com/rclone/rclone/cmd/authorize"
|
||||
_ "github.com/rclone/rclone/cmd/backend"
|
||||
_ "github.com/rclone/rclone/cmd/cachestats"
|
||||
_ "github.com/rclone/rclone/cmd/cat"
|
||||
_ "github.com/rclone/rclone/cmd/check"
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/rc"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
options []string
|
||||
useJSON bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "backend <command> remote:path [opts] <args>",
|
||||
Short: `Run a backend specific command.`,
|
||||
Long: `
|
||||
This runs a backend specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, eg:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1E6, command, args)
|
||||
name, remote := args[0], args[1]
|
||||
cmd.Run(false, false, command, func() error {
|
||||
// show help if remote is a backend name
|
||||
if name == "help" {
|
||||
fsInfo, err := fs.Find(remote)
|
||||
if err == nil {
|
||||
return showHelp(fsInfo)
|
||||
}
|
||||
}
|
||||
// Create remote
|
||||
fsInfo, configName, fsPath, config, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := fsInfo.NewFs(configName, fsPath, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Run the command
|
||||
var out interface{}
|
||||
switch name {
|
||||
case "help":
|
||||
return showHelp(fsInfo)
|
||||
case "features":
|
||||
out = operations.GetFsInfo(f)
|
||||
default:
|
||||
doCommand := f.Features().Command
|
||||
if doCommand == nil {
|
||||
return errors.Errorf("%v: doesn't support backend commands", f)
|
||||
}
|
||||
arg := args[2:]
|
||||
opt := rc.ParseOptions(options)
|
||||
out, err = doCommand(context.Background(), name, arg, opt)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "command %q failed", name)
|
||||
|
||||
}
|
||||
// Output the result
|
||||
writeJSON := false
|
||||
if useJSON {
|
||||
writeJSON = true
|
||||
} else {
|
||||
switch x := out.(type) {
|
||||
case nil:
|
||||
case string:
|
||||
fmt.Println(out)
|
||||
case []string:
|
||||
for _, line := range x {
|
||||
fmt.Println(line)
|
||||
}
|
||||
default:
|
||||
writeJSON = true
|
||||
}
|
||||
}
|
||||
if writeJSON {
|
||||
// Write indented JSON to the output
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", "\t")
|
||||
err = enc.Encode(out)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write JSON")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// show help for a backend
|
||||
func showHelp(fsInfo *fs.RegInfo) error {
|
||||
cmds := fsInfo.CommandHelp
|
||||
name := fsInfo.Name
|
||||
if len(cmds) == 0 {
|
||||
return errors.Errorf("%s backend has no commands", name)
|
||||
}
|
||||
fmt.Printf("### Backend commands\n\n")
|
||||
fmt.Printf(`Here are the commands specific to the %s backend.
|
||||
|
||||
Run them with with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
|
||||
`, name)
|
||||
for _, cmd := range cmds {
|
||||
fmt.Printf("#### %s\n\n", cmd.Name)
|
||||
fmt.Printf("%s\n\n", cmd.Short)
|
||||
fmt.Printf(" rclone backend %s remote: [options] [<arguments>+]\n\n", cmd.Name)
|
||||
if cmd.Long != "" {
|
||||
fmt.Printf("%s\n\n", cmd.Long)
|
||||
}
|
||||
if len(cmd.Opts) != 0 {
|
||||
fmt.Printf("Options:\n\n")
|
||||
|
||||
ks := []string{}
|
||||
for k := range cmd.Opts {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
for _, k := range ks {
|
||||
v := cmd.Opts[k]
|
||||
fmt.Printf("- %q: %s\n", k, v)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -23,10 +22,8 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
|
||||
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -66,7 +66,6 @@ const (
|
||||
exitCodeNoRetryError
|
||||
exitCodeFatalError
|
||||
exitCodeTransferExceeded
|
||||
exitCodeNoFilesTransferred
|
||||
)
|
||||
|
||||
// ShowVersion prints the version to stdout
|
||||
@@ -313,7 +312,6 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
}
|
||||
resolveExitCode(cmdErr)
|
||||
|
||||
}
|
||||
|
||||
// CheckArgs checks there are enough arguments and prints a message if not
|
||||
@@ -432,11 +430,6 @@ func initConfig() {
|
||||
func resolveExitCode(err error) {
|
||||
atexit.Run()
|
||||
if err == nil {
|
||||
if fs.Config.ErrorOnNoTransfer {
|
||||
if accounting.GlobalStats().GetTransfers() == 0 {
|
||||
os.Exit(exitCodeNoFilesTransferred)
|
||||
}
|
||||
}
|
||||
os.Exit(exitCodeSuccess)
|
||||
}
|
||||
|
||||
|
||||
@@ -251,14 +251,7 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
if usingReaddirPlus {
|
||||
// We have called host.SetCapReaddirPlus() so supply the stat information
|
||||
var stat fuse.Stat_t
|
||||
_ = fsys.stat(node, &stat) // not capable of returning an error
|
||||
fill(name, &stat, 0)
|
||||
} else {
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
}
|
||||
itemsRead = len(items)
|
||||
@@ -275,15 +268,25 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
|
||||
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
|
||||
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
|
||||
const blockSize = 4096
|
||||
total, _, free := fsys.VFS.Statfs()
|
||||
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
|
||||
stat.Bfree = uint64(free) / blockSize // Free blocks in file system.
|
||||
stat.Bavail = stat.Bfree // Free blocks in file system if you're not root.
|
||||
stat.Files = 1e9 // Total files in file system.
|
||||
stat.Ffree = 1e9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
stat.Blocks = fsBlocks // Total data blocks in file system.
|
||||
stat.Bfree = fsBlocks // Free blocks in file system.
|
||||
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
stat.Files = 1e9 // Total files in file system.
|
||||
stat.Ffree = 1e9 // Free files in file system.
|
||||
stat.Bsize = blockSize // Block size
|
||||
stat.Namemax = 255 // Maximum file name length?
|
||||
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
total, used, free := fsys.VFS.Statfs()
|
||||
if total >= 0 {
|
||||
stat.Blocks = uint64(total) / blockSize
|
||||
}
|
||||
if used >= 0 {
|
||||
stat.Bfree = stat.Blocks - uint64(used)/blockSize
|
||||
}
|
||||
if free >= 0 {
|
||||
stat.Bavail = uint64(free) / blockSize
|
||||
}
|
||||
mountlib.ClipBlocks(&stat.Blocks)
|
||||
mountlib.ClipBlocks(&stat.Bfree)
|
||||
mountlib.ClipBlocks(&stat.Bavail)
|
||||
@@ -545,11 +548,11 @@ func translateError(err error) (errc int) {
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return 0
|
||||
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
|
||||
case vfs.ENOENT:
|
||||
return -fuse.ENOENT
|
||||
case vfs.EEXIST, fs.ErrorDirExists:
|
||||
case vfs.EEXIST:
|
||||
return -fuse.EEXIST
|
||||
case vfs.EPERM, fs.ErrorPermissionDenied:
|
||||
case vfs.EPERM:
|
||||
return -fuse.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return -fuse.EBADF
|
||||
@@ -561,7 +564,7 @@ func translateError(err error) (errc int) {
|
||||
return -fuse.EBADF
|
||||
case vfs.EROFS:
|
||||
return -fuse.EROFS
|
||||
case vfs.ENOSYS, fs.ErrorNotImplemented:
|
||||
case vfs.ENOSYS:
|
||||
return -fuse.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return -fuse.EINVAL
|
||||
|
||||
@@ -26,13 +26,6 @@ import (
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
const (
|
||||
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
|
||||
// capability [Windows only]. A file system that has the readdir-plus capability can send
|
||||
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
|
||||
usingReaddirPlus = runtime.GOOS == "windows"
|
||||
)
|
||||
|
||||
func init() {
|
||||
name := "cmount"
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -149,10 +142,6 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
// Create underlying FS
|
||||
fsys := NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
if usingReaddirPlus {
|
||||
host.SetCapReaddirPlus(true)
|
||||
}
|
||||
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
|
||||
|
||||
// Create options
|
||||
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)
|
||||
|
||||
@@ -11,9 +11,9 @@ package cmount
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
vfstest.RunTests(t, false, mount)
|
||||
mounttest.RunTests(t, mount)
|
||||
}
|
||||
|
||||
@@ -66,8 +66,8 @@ option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can copy all the files which have changed
|
||||
recently very efficiently like this:
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
|
||||
@@ -15,14 +15,12 @@ import (
|
||||
var (
|
||||
autoFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
|
||||
}
|
||||
|
||||
@@ -37,9 +35,6 @@ Setting --auto-filename will cause the file name to be retreived from
|
||||
the from URL (after any redirections) and used in the destination
|
||||
path.
|
||||
|
||||
Setting --no-clobber will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting --stdout or making the output file name "-" will cause the
|
||||
output to be written to standard output.
|
||||
`,
|
||||
@@ -64,7 +59,7 @@ output to be written to standard output.
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
|
||||
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -110,6 +110,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false
|
||||
}
|
||||
fs.Debugf(src, "OK")
|
||||
return false, false
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ use it like this
|
||||
}
|
||||
|
||||
// cryptDecode returns the unencrypted file name
|
||||
func cryptDecode(cipher *crypt.Cipher, args []string) error {
|
||||
func cryptDecode(cipher crypt.Cipher, args []string) error {
|
||||
output := ""
|
||||
|
||||
for _, encryptedFileName := range args {
|
||||
@@ -78,7 +78,7 @@ func cryptDecode(cipher *crypt.Cipher, args []string) error {
|
||||
}
|
||||
|
||||
// cryptEncode returns the encrypted file name
|
||||
func cryptEncode(cipher *crypt.Cipher, args []string) error {
|
||||
func cryptEncode(cipher crypt.Cipher, args []string) error {
|
||||
output := ""
|
||||
|
||||
for _, fileName := range args {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/backend/dropbox"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -24,11 +23,9 @@ hashes are calculated according to [Dropbox content hash
|
||||
rules](https://www.dropbox.com/developers/reference/content-hash).
|
||||
The output is in the same format as md5sum and sha1sum.
|
||||
`,
|
||||
Hidden: true,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.HashLister(context.Background(), dropbox.DbHashType, fsrc, os.Stdout)
|
||||
})
|
||||
|
||||
@@ -4,19 +4,12 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
rmdirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -30,8 +23,6 @@ filters so can be used to selectively delete files.
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
` + "`" + `rclone purge` + "`" + `
|
||||
|
||||
If you supply the --rmdirs flag, it will remove all empty directories along with it.
|
||||
|
||||
Eg delete all files bigger than 100MBytes
|
||||
|
||||
Check what would be deleted first (use either)
|
||||
@@ -50,14 +41,7 @@ delete all files bigger than 100MBytes.
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if err := operations.Delete(context.Background(), fsrc); err != nil {
|
||||
return err
|
||||
}
|
||||
if rmdirs {
|
||||
fdst := cmd.NewFsDir(args)
|
||||
return operations.Rmdirs(context.Background(), fdst, "", true)
|
||||
}
|
||||
return nil
|
||||
return operations.Delete(context.Background(), fsrc)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user