1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-08 12:39:57 +00:00

Compare commits

..

23 Commits

Author SHA1 Message Date
Nick Craig-Wood
98ad80bee3 union: fix slash behaviour on Windows 2020-03-10 15:59:42 +00:00
Max Sum
7da83346bf union: Implement policy by least number of object 2020-03-09 16:16:30 +00:00
Max Sum
c4545465e7 union: make quota relevant policies resilient to unsupported fields 2020-03-09 16:16:30 +00:00
Max Sum
67b38a457b union: add testings 2020-03-09 16:16:30 +00:00
Max Sum
c9374fbe5a union: fix issues when using space-relavant and path-preserving policies
Path-preserving policy would need to look for the parent dir of operating path. Therefor if the operating path is the
same path as root passed in during NewFs, there would be no room for uplooking. And also About() might have
problem if the folder is no exist. RootFs is added to solve this problem.
2020-03-09 16:16:30 +00:00
Max Sum
0081971ade union: update document 2020-03-09 16:16:30 +00:00
Max Sum
6898a0cccd union: backward compatible to old config 2020-03-09 16:16:30 +00:00
Max Sum
f0c17a72db union: refine implementation 2020-03-09 16:16:30 +00:00
Max Sum
266c200f8c union: fix mkdir when using path-preserving policy 2020-03-09 16:16:30 +00:00
Max Sum
3b4cafddad union: fix code quality issue 2020-03-09 16:16:30 +00:00
Max Sum
d7bb2d1d89 union: Add multiple error handler 2020-03-09 16:16:30 +00:00
Max Sum
540bd61305 union: fix goimports 2020-03-09 16:16:30 +00:00
Max Sum
3cd1b20236 union: add cancel to ctx 2020-03-09 16:16:30 +00:00
Max Sum
998169fc02 union: goimports fix 2020-03-09 16:16:30 +00:00
Max Sum
05666e6e51 union: fix indent 2020-03-09 16:16:30 +00:00
Max Sum
5720501b19 union: move entries to new file 2020-03-09 16:16:30 +00:00
Max Sum
a124ce1fb3 union: fix wrong behavior of NewFs, List and Purge 2020-03-09 16:16:30 +00:00
Max Sum
1b1e156908 union: Add fast path for single upstream upload 2020-03-09 16:16:30 +00:00
Max Sum
cd26142705 union: fix crash when upstream returns error 2020-03-09 16:16:30 +00:00
Max Sum
37e21f767c union: implement new policies
Implement eplfs, eplus, eprand, lfs, lus, newest and rand.
2020-03-09 16:16:30 +00:00
Max Sum
d3807c5a0d union: fix epall and all policy 2020-03-09 16:16:30 +00:00
Max Sum
36e184266f union: fix description and variable names of epff, epmfs, mfs policies 2020-03-09 16:16:30 +00:00
Max Sum
da9a44ea5e union: implement write on multiple remotes
Introduce policy from mergerfs.
2020-03-09 16:16:30 +00:00
799 changed files with 2074 additions and 100192 deletions

View File

@@ -5,7 +5,7 @@ WORKDIR /go/src/github.com/rclone/rclone/
RUN make quicktest
RUN \
CGO_ENABLED=0 \
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
make
RUN ./rclone version

View File

@@ -2,19 +2,17 @@
Current active maintainers of rclone are:
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Name | GitHub ID | Specific Responsibilities |
| :--------------- | :---------- | :-------------------------- |
| Nick Craig-Wood | @ncw | overall project health |
| Stefan Breunig | @breunigs | |
| Ishuah Kariuki | @ishuah | |
| Remus Bunduc | @remusb | cache backend |
| Fabian Möller | @B4dM4n | |
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft**

View File

@@ -59,11 +59,9 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -31,12 +31,10 @@ import (
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"

View File

@@ -35,7 +35,6 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
)
const (
@@ -60,8 +59,6 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Register with Fs
@@ -128,28 +125,6 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
operations from remote will not be allowed. User should first restore by
tiering blob to "Hot" or "Cool".`,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: memoryPoolFlushTime,
Advanced: true,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -166,19 +141,16 @@ This option controls how often unused buffers will be removed from the pool.`,
// Options defines the configuration for this backend
type Options struct {
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
Account string `config:"account"`
Key string `config:"key"`
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
UseEmulator bool `config:"use_emulator"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote azure server
@@ -197,7 +169,6 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a azure object
@@ -286,12 +257,6 @@ var retryErrorCodes = []int{
func (f *Fs) shouldRetry(err error) (bool, error) {
// FIXME interpret special errors - more to do here
if storageErr, ok := err.(azblob.StorageError); ok {
switch storageErr.ServiceCode() {
case "InvalidBlobOrBlock":
// These errors happen sometimes in multipart uploads
// because of block concurrency issues
return true, err
}
statusCode := storageErr.Response().StatusCode
for _, e := range retryErrorCodes {
if statusCode == e {
@@ -417,12 +382,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
client: fshttp.NewClient(fs.Config),
cache: bucket.NewCache(),
cntURLcache: make(map[string]*azblob.ContainerURL, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -866,10 +825,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// If this is a SAS URL limited to a container then assume it is already created
if f.isLimited {
return nil
}
// now try to create the container
return f.pacer.Call(func() (bool, error) {
_, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
@@ -1023,19 +978,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1344,7 +1286,6 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL,
position := int64(0)
errs := make(chan error, 1)
var wg sync.WaitGroup
memPool := o.fs.getMemoryPool(chunkSize)
outer:
for part := 0; part < int(totalParts); part++ {
// Check any errors
@@ -1359,33 +1300,29 @@ outer:
reqSize = chunkSize
}
// Get a block of memory from the pool and a token which limits concurrency
o.fs.uploadToken.Get()
buf := memPool.Get()
buf = buf[:reqSize]
// Make a block of memory
buf := make([]byte, reqSize)
// Read the chunk
_, err = io.ReadFull(in, buf)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to read source")
memPool.Put(buf) // return the buf
o.fs.uploadToken.Put() // return the token
break outer
}
// Transfer the chunk
nextID()
wg.Add(1)
o.fs.uploadToken.Get()
go func(part int, position int64, blockID string) {
defer wg.Done()
defer o.fs.uploadToken.Put()
defer memPool.Put(buf)
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, totalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err := o.fs.pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeeker{wrappedReader, bufferReader}
@@ -1452,14 +1389,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
// in order to validate its integrity during transport
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.ContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}

View File

@@ -124,13 +124,8 @@ minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files
Normally rclone will calculate the SHA1 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files`,
Default: false,
Advanced: true,
}, {
@@ -673,7 +668,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
remote := file.Name[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if isDirectory {
remote = remote[:len(remote)-1]
}
@@ -1375,21 +1370,6 @@ func (o *Object) Size() int64 {
return o.size
}
// Clean the SHA1
//
// Make sure it is lower case
//
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
func cleanSHA1(sha1 string) (out string) {
out = strings.ToLower(sha1)
const unverified = "unverified:"
if strings.HasPrefix(out, unverified) {
out = out[len(unverified):]
}
return out
}
// decodeMetaDataRaw sets the metadata from the data passed in
//
// Sets
@@ -1405,7 +1385,12 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
o.sha1 = cleanSHA1(o.sha1)
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
const unverified = "unverified:"
if strings.HasPrefix(o.sha1, unverified) {
o.sha1 = o.sha1[len(unverified):]
}
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)
@@ -1663,7 +1648,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.sha1 = resp.Header.Get(sha1InfoHeader)
fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
}
o.sha1 = cleanSHA1(o.sha1)
}
// Don't check length or hash on partial content
if resp.StatusCode == http.StatusPartialContent {
@@ -1832,7 +1816,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
RootURL: upload.UploadURL,
Body: in,
Options: options,
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),

View File

@@ -184,6 +184,13 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// clearUploadURL clears the current UploadURL and the AuthorizationToken
func (up *largeUpload) clearUploadURL() {
up.uploadMu.Lock()
up.uploads = nil
up.uploadMu.Unlock()
}
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {

View File

@@ -53,7 +53,8 @@ const (
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
rootID = "0" // ID of root folder is always this
rootURL = "https://api.box.com/2.0"
uploadURL = "https://upload.box.com/api/2.0"
listChunks = 1000 // chunk size to read directory listings
@@ -88,7 +89,22 @@ func init() {
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
var err error
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(jsonFile, boxSubType, name, m)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
if err != nil {
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
@@ -105,11 +121,6 @@ func init() {
}, {
Name: config.ConfigClientSecret,
Help: "Box App Client Secret\nLeave blank normally.",
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "0",
Advanced: true,
}, {
Name: "box_config_file",
Help: "Box App config.json location\nLeave blank normally.",
@@ -152,26 +163,6 @@ func init() {
})
}
func refreshJWTToken(jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
client := fshttp.NewClient(fs.Config)
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
return err
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := ioutil.ReadFile(configFile)
if err != nil {
@@ -194,6 +185,7 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimS
Iss: boxConfig.BoxAppSettings.ClientID,
Sub: boxConfig.EnterpriseID,
Aud: tokenURL,
Iat: time.Now().Unix(),
Exp: time.Now().Add(time.Second * 45).Unix(),
PrivateClaims: map[string]interface{}{
"box_sub_type": boxSubType,
@@ -244,7 +236,6 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CommitRetries int `config:"commit_retries"`
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
}
// Fs represents a remote box
@@ -402,27 +393,13 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
} else {
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
return err
})
}
// Get rootFolderID
rootID := f.opt.RootFolderID
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -1190,7 +1167,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),
ContentModifiedAt: api.Time(modTime),
@@ -1209,7 +1186,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID str
MultipartContentName: "contents",
MultipartFileName: upload.Name,
RootURL: uploadURL,
Options: options,
}
// If object has an ID then it is existing so create a new version
if o.id != "" {
@@ -1251,9 +1227,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload with simple or multipart
if size <= int64(o.fs.opt.UploadCutoff) {
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
err = o.upload(ctx, in, leaf, directoryID, modTime)
} else {
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
}
return err
}

View File

@@ -54,7 +54,7 @@ func sha1Digest(digest []byte) string {
}
// uploadPart uploads a part in an upload session
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn, options ...fs.OpenOption) (response *api.UploadPartResponse, err error) {
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
chunkSize := int64(len(chunk))
sha1sum := sha1.Sum(chunk)
opts := rest.Opts{
@@ -64,7 +64,6 @@ func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, total
ContentType: "application/octet-stream",
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
Options: options,
ExtraHeaders: map[string]string{
"Digest": sha1Digest(sha1sum[:]),
},
@@ -172,7 +171,7 @@ func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error)
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time, options ...fs.OpenOption) (err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
// Create upload session
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
if err != nil {
@@ -237,7 +236,7 @@ outer:
defer wg.Done()
defer o.fs.uploadToken.Put()
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap, options...)
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
if err != nil {
err = errors.Wrap(err, "multipart upload failed to upload part")
select {

View File

@@ -65,7 +65,6 @@ func init() {
Name: "cache",
Description: "Cache a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -1883,31 +1882,6 @@ func (f *Fs) Disconnect(ctx context.Context) error {
return do(ctx)
}
var commandHelp = []fs.CommandHelp{
{
Name: "stats",
Short: "Print stats on the cache backend in JSON format.",
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "stats":
return f.Stats()
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -1925,5 +1899,4 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
)

View File

@@ -980,6 +980,15 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
// TO BE USED IN TESTING ONLY
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
return b.db.Update(func(tx *bolt.Tx) error {
@@ -1027,6 +1036,19 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
})
}
// PurgeTempUploads will remove all the pending uploads from the queue
// TO BE USED IN TESTING ONLY
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// Close should be called when the program ends gracefully
func (b *Persistent) Close() {
b.cleanupMux.Lock()

View File

@@ -1,23 +0,0 @@
package cache
import bolt "go.etcd.io/bbolt"
// PurgeTempUploads will remove all the pending uploads from the queue
func (b *Persistent) PurgeTempUploads() {
b.tempQueueMux.Lock()
defer b.tempQueueMux.Unlock()
_ = b.db.Update(func(tx *bolt.Tx) error {
_ = tx.DeleteBucket([]byte(tempBucket))
_, _ = tx.CreateBucketIfNotExists([]byte(tempBucket))
return nil
})
}
// SetPendingUploadToStarted is a way to mark an entry as started (even if it's not already)
func (b *Persistent) SetPendingUploadToStarted(remote string) error {
return b.updatePendingUpload(remote, func(item *tempUploadInfo) error {
item.Started = true
return nil
})
}

View File

@@ -71,6 +71,30 @@ type ReadSeekCloser interface {
// OpenRangeSeek opens the file handle at the offset with the limit given
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
// Cipher is used to swap out the encryption implementations
type Cipher interface {
// EncryptFileName encrypts a file path
EncryptFileName(string) string
// DecryptFileName decrypts a file path, returns error if decrypt was invalid
DecryptFileName(string) (string, error)
// EncryptDirName encrypts a directory path
EncryptDirName(string) string
// DecryptDirName decrypts a directory path, returns error if decrypt was invalid
DecryptDirName(string) (string, error)
// EncryptData
EncryptData(io.Reader) (io.Reader, error)
// DecryptData
DecryptData(io.ReadCloser) (io.ReadCloser, error)
// DecryptDataSeek decrypt at a given position
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
// EncryptedSize calculates the size of the data when encrypted
EncryptedSize(int64) int64
// DecryptedSize calculates the size of the data when decrypted
DecryptedSize(int64) (int64, error)
// NameEncryptionMode returns the used mode for name handling
NameEncryptionMode() NameEncryptionMode
}
// NameEncryptionMode is the type of file name encryption in use
type NameEncryptionMode int
@@ -112,8 +136,7 @@ func (mode NameEncryptionMode) String() (out string) {
return out
}
// Cipher defines an encoding and decoding cipher for the crypt backend
type Cipher struct {
type cipher struct {
dataKey [32]byte // Key for secretbox
nameKey [32]byte // 16,24 or 32 bytes
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
@@ -125,8 +148,8 @@ type Cipher struct {
}
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
c := &Cipher{
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*cipher, error) {
c := &cipher{
mode: mode,
cryptoRand: rand.Reader,
dirNameEncrypt: dirNameEncrypt,
@@ -149,7 +172,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
//
// Note that empty passsword makes all 0x00 keys which is used in the
// tests.
func (c *Cipher) Key(password, salt string) (err error) {
func (c *cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
var saltBytes = defaultSalt
if salt != "" {
@@ -173,12 +196,12 @@ func (c *Cipher) Key(password, salt string) (err error) {
}
// getBlock gets a block from the pool of size blockSize
func (c *Cipher) getBlock() []byte {
func (c *cipher) getBlock() []byte {
return c.buffers.Get().([]byte)
}
// putBlock returns a block to the pool of size blockSize
func (c *Cipher) putBlock(buf []byte) {
func (c *cipher) putBlock(buf []byte) {
if len(buf) != blockSize {
panic("bad blocksize returned to pool")
}
@@ -223,7 +246,7 @@ func decodeFileName(in string) ([]byte, error) {
// This means that
// * filenames with the same name will encrypt the same
// * filenames which start the same won't have a common prefix
func (c *Cipher) encryptSegment(plaintext string) string {
func (c *cipher) encryptSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -233,7 +256,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
}
// decryptSegment decrypts a path segment
func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
func (c *cipher) decryptSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -260,7 +283,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
}
// Simple obfuscation routines
func (c *Cipher) obfuscateSegment(plaintext string) string {
func (c *cipher) obfuscateSegment(plaintext string) string {
if plaintext == "" {
return ""
}
@@ -347,7 +370,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
return result.String()
}
func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
@@ -434,7 +457,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
}
// encryptFileName encrypts a file path
func (c *Cipher) encryptFileName(in string) string {
func (c *cipher) encryptFileName(in string) string {
segments := strings.Split(in, "/")
for i := range segments {
// Skip directory name encryption if the user chose to
@@ -452,7 +475,7 @@ func (c *Cipher) encryptFileName(in string) string {
}
// EncryptFileName encrypts a file path
func (c *Cipher) EncryptFileName(in string) string {
func (c *cipher) EncryptFileName(in string) string {
if c.mode == NameEncryptionOff {
return in + encryptedSuffix
}
@@ -460,7 +483,7 @@ func (c *Cipher) EncryptFileName(in string) string {
}
// EncryptDirName encrypts a directory path
func (c *Cipher) EncryptDirName(in string) string {
func (c *cipher) EncryptDirName(in string) string {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in
}
@@ -468,7 +491,7 @@ func (c *Cipher) EncryptDirName(in string) string {
}
// decryptFileName decrypts a file path
func (c *Cipher) decryptFileName(in string) (string, error) {
func (c *cipher) decryptFileName(in string) (string, error) {
segments := strings.Split(in, "/")
for i := range segments {
var err error
@@ -491,7 +514,7 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
}
// DecryptFileName decrypts a file path
func (c *Cipher) DecryptFileName(in string) (string, error) {
func (c *cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
@@ -503,15 +526,14 @@ func (c *Cipher) DecryptFileName(in string) (string, error) {
}
// DecryptDirName decrypts a directory path
func (c *Cipher) DecryptDirName(in string) (string, error) {
func (c *cipher) DecryptDirName(in string) (string, error) {
if c.mode == NameEncryptionOff || !c.dirNameEncrypt {
return in, nil
}
return c.decryptFileName(in)
}
// NameEncryptionMode returns the encryption mode in use for names
func (c *Cipher) NameEncryptionMode() NameEncryptionMode {
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
return c.mode
}
@@ -579,7 +601,7 @@ func (n *nonce) add(x uint64) {
type encrypter struct {
mu sync.Mutex
in io.Reader
c *Cipher
c *cipher
nonce nonce
buf []byte
readBuf []byte
@@ -589,7 +611,7 @@ type encrypter struct {
}
// newEncrypter creates a new file handle encrypting on the fly
func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
func (c *cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) {
fh := &encrypter{
in: in,
c: c,
@@ -661,19 +683,13 @@ func (fh *encrypter) finish(err error) (int, error) {
}
// Encrypt data encrypts the data stream
func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) {
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
out, err := c.newEncrypter(in, nil)
if err != nil {
return nil, nil, err
return nil, err
}
return wrap(out), out, nil // and wrap the accounting back on
}
// EncryptData encrypts the data stream
func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) {
out, _, err := c.encryptData(in)
return out, err
return wrap(out), nil // and wrap the accounting back on
}
// decrypter decrypts an io.ReaderCloser on the fly
@@ -682,7 +698,7 @@ type decrypter struct {
rc io.ReadCloser
nonce nonce
initialNonce nonce
c *Cipher
c *cipher
buf []byte
readBuf []byte
bufIndex int
@@ -693,7 +709,7 @@ type decrypter struct {
}
// newDecrypter creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
fh := &decrypter{
rc: rc,
c: c,
@@ -721,7 +737,7 @@ func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
}
// newDecrypterSeek creates a new file handle decrypting on the fly
func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
var rc io.ReadCloser
doRangeSeek := false
setLimit := false
@@ -996,7 +1012,7 @@ func (fh *decrypter) finishAndClose(err error) error {
}
// DecryptData decrypts the data stream
func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
out, err := c.newDecrypter(rc)
if err != nil {
return nil, err
@@ -1009,7 +1025,7 @@ func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
// The open function must return a ReadCloser opened to the offset supplied
//
// You must use this form of DecryptData if you might want to Seek the file handle
func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
if err != nil {
return nil, err
@@ -1018,7 +1034,7 @@ func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset
}
// EncryptedSize calculates the size of the data when encrypted
func (c *Cipher) EncryptedSize(size int64) int64 {
func (c *cipher) EncryptedSize(size int64) int64 {
blocks, residue := size/blockDataSize, size%blockDataSize
encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
if residue != 0 {
@@ -1028,7 +1044,7 @@ func (c *Cipher) EncryptedSize(size int64) int64 {
}
// DecryptedSize calculates the size of the data when decrypted
func (c *Cipher) DecryptedSize(size int64) (int64, error) {
func (c *cipher) DecryptedSize(size int64) (int64, error) {
size -= int64(fileHeaderSize)
if size < 0 {
return 0, ErrorEncryptedFileTooShort
@@ -1047,6 +1063,7 @@ func (c *Cipher) DecryptedSize(size int64) (int64, error) {
// check interfaces
var (
_ Cipher = (*cipher)(nil)
_ io.ReadCloser = (*decrypter)(nil)
_ io.Seeker = (*decrypter)(nil)
_ fs.RangeSeeker = (*decrypter)(nil)

View File

@@ -12,7 +12,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -785,7 +784,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in := &errorReader{io.ErrUnexpectedEOF}
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
@@ -794,6 +793,14 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
assert.Equal(t, int64(32), n)
}
type errorReader struct {
err error
}
func (er errorReader) Read(p []byte) (n int, err error) {
return 0, er.err
}
type closeDetector struct {
io.Reader
closed int
@@ -831,7 +838,7 @@ func TestNewDecrypter(t *testing.T) {
assert.Equal(t, 1, cd.closed)
}
er := &readers.ErrorReader{Err: errors.New("potato")}
er := &errorReader{errors.New("potato")}
cd = newCloseDetector(er)
fh, err = c.newDecrypter(cd)
assert.Nil(t, fh)
@@ -857,7 +864,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
c, err := newCipher(NameEncryptionStandard, "", "", true)
assert.NoError(t, err)
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in2 := &errorReader{io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := ioutil.NopCloser(io.MultiReader(in1, in2))
@@ -1111,7 +1118,7 @@ func TestDecrypterRead(t *testing.T) {
// Test producing an error on the file on Read the underlying file
in1 := bytes.NewBuffer(file1)
in2 := &readers.ErrorReader{Err: errors.New("potato")}
in2 := &errorReader{errors.New("potato")}
in := io.MultiReader(in1, in2)
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)

View File

@@ -26,7 +26,6 @@ func init() {
Name: "crypt",
Description: "Encrypt/Decrypt a remote",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "remote",
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
@@ -91,7 +90,7 @@ names, or for debugging purposes.`,
}
// newCipherForConfig constructs a Cipher for the given config name
func newCipherForConfig(opt *Options) (*Cipher, error) {
func newCipherForConfig(opt *Options) (Cipher, error) {
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
if err != nil {
return nil, err
@@ -118,7 +117,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
}
// NewCipher constructs a Cipher for the given config
func NewCipher(m configmap.Mapper) (*Cipher, error) {
func NewCipher(m configmap.Mapper) (Cipher, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
@@ -204,7 +203,7 @@ type Fs struct {
root string
opt Options
features *fs.Features // optional features
cipher *Cipher
cipher Cipher
}
// Name of the remote (as passed into NewFs)
@@ -328,7 +327,7 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
// Encrypt the data into wrappedIn
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
@@ -352,7 +351,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
}
// Transfer the data
o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...)
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
if err != nil {
return nil, err
}
@@ -505,11 +504,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
wrappedIn, encrypter, err := f.cipher.encryptData(in)
wrappedIn, err := f.cipher.EncryptData(in)
if err != nil {
return nil, err
}
o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce))
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
if err != nil {
return nil, err
}
@@ -562,37 +561,6 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
return f.cipher.DecryptFileName(encryptedFileName)
}
// computeHashWithNonce takes the nonce and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
// Note that we break lots of encapsulation in this function.
func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) {
// Open the src for input
in, err := src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// ComputeHash takes the nonce from o, and encrypts the contents of
// src with it, and calculates the hash given by HashType on the fly
//
@@ -604,7 +572,7 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
if err != nil {
return "", errors.Wrap(err, "failed to open object to read nonce")
}
d, err := f.cipher.newDecrypter(in)
d, err := f.cipher.(*cipher).newDecrypter(in)
if err != nil {
_ = in.Close()
return "", errors.Wrap(err, "failed to open object to read nonce")
@@ -629,7 +597,30 @@ func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType
return "", errors.Wrap(err, "failed to close nonce read")
}
return f.computeHashWithNonce(ctx, nonce, src, hashType)
// Open the src for input
in, err = src.Open(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to open src")
}
defer fs.CheckClose(in, &err)
// Now encrypt the src with the nonce
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
if err != nil {
return "", errors.Wrap(err, "failed to make encrypter")
}
// pipe into hash
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
if err != nil {
return "", errors.Wrap(err, "failed to make hasher")
}
_, err = io.Copy(m, out)
if err != nil {
return "", errors.Wrap(err, "failed to hash data")
}
return m.Sums()[hashType], nil
}
// MergeDirs merges the contents of all the directories passed
@@ -701,64 +692,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
do(ctx, wrappedNotifyFunc, pollIntervalChan)
}
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s)",
Long: `Encode the given filename(s)
Usage Example:
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s)",
Long: `Decode the given filename(s)
Usage Example:
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "decode":
out := make(map[string]string)
for _, encryptedFileName := range arg {
fileName, err := f.DecryptFileName(encryptedFileName)
if err != nil {
return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName))
}
out[encryptedFileName] = fileName
}
return out, nil
case "encode":
out := make(map[string]string)
for _, fileName := range arg {
encryptedFileName := f.EncryptFileName(fileName)
out[fileName] = encryptedFileName
}
return out, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// Object describes a wrapped for being read from the Fs
//
// This decrypts the remote name and decrypts the data
@@ -900,15 +833,13 @@ func (f *Fs) Disconnect(ctx context.Context) error {
// This encrypts the remote name and adjusts the size
type ObjectInfo struct {
fs.ObjectInfo
f *Fs
nonce nonce
f *Fs
}
func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo {
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
return &ObjectInfo{
ObjectInfo: src,
f: f,
nonce: nonce,
}
}
@@ -934,23 +865,6 @@ func (o *ObjectInfo) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
var srcObj fs.Object
var ok bool
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is a operations.OverrideRemote
srcObj = do.UnWrap()
} else {
return "", nil
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
}
return "", nil
}
@@ -989,7 +903,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)

View File

@@ -1,143 +0,0 @@
package crypt
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs()
require.NoError(t, err)
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
}
return obj, cleanup
}
// Test the ObjectInfo
func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var (
contents = random.String(100)
path = "hash_test_object"
ctx = context.Background()
)
if wrap {
path = "_wrap"
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
var outBuf bytes.Buffer
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in a fs.ObjectUnwrapper if required
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encryptor
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods
assert.Equal(t, int64(outBuf.Len()), src.Size())
assert.Equal(t, f, src.Fs())
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
}
func testComputeHash(t *testing.T, f *Fs) {
var (
contents = random.String(100)
path = "compute_hash_test"
ctx = context.Background()
hashType = f.Fs.Hashes().GetOne()
)
if hashType == hash.None {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
require.NoError(t, err)
// Test computed hash matches remote object hash
remoteObjHash, err := remoteObj.(*Object).Object.Hash(ctx, hashType)
require.NoError(t, err)
assert.Equal(t, remoteObjHash, computedHash)
}
// InternalTest is called by fstests.Run to extra tests
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ObjectInfo", func(t *testing.T) { testObjectInfo(t, f, false) })
t.Run("ObjectInfoWrap", func(t *testing.T) { testObjectInfo(t, f, true) })
t.Run("ComputeHash", func(t *testing.T) { testComputeHash(t, f) })
}

610
backend/drive/drive.go Executable file → Normal file
View File

@@ -29,7 +29,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -55,7 +54,6 @@ const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
driveFolderType = "application/vnd.google-apps.folder"
shortcutMimeType = "application/vnd.google-apps.shortcut"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
defaultMinSleep = fs.Duration(100 * time.Millisecond)
@@ -67,7 +65,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails"
partialFields = "id,name,size,md5Checksum,trashed,modifiedTime,createdTime,mimeType,parents,webViewLink"
)
// Globals
@@ -159,7 +157,6 @@ func init() {
Name: "drive",
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
// Parse config into Options struct
@@ -400,7 +397,7 @@ will download it anyway.`,
Default: false,
Help: `Show sizes as storage quota usage, not actual size.
Show the size of a file as the storage quota used. This is the
Show the size of a file as the the storage quota used. This is the
current version plus any older versions that have been set to keep
forever.
@@ -470,16 +467,6 @@ Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: "skip_shortcuts",
Help: `If set skip shortcut files
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -537,7 +524,6 @@ type Options struct {
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -556,8 +542,6 @@ type Fs struct {
exportExtensions []string // preferred extensions to download docs
importMimeTypes []string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
fileFields googleapi.Field // fields to fetch file info with
m configmap.Mapper
}
type baseObject struct {
@@ -567,7 +551,6 @@ type baseObject struct {
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents int // number of parents
}
type documentObject struct {
baseObject
@@ -633,9 +616,6 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return false, fserrors.FatalError(err)
}
return true, err
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received team drive file limit error: %v", err)
return false, fserrors.FatalError(err)
}
}
}
@@ -662,21 +642,17 @@ func containsString(slice []string, s string) bool {
return false
}
// getFile returns drive.File for the ID passed and fields passed in
func (f *Fs) getFile(ID string, fields googleapi.Field) (info *drive.File, err error) {
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get(ID).
Fields(fields).
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return f.shouldRetry(err)
})
return info, err
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
info, err := f.getFile("root", "id")
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
@@ -742,7 +718,7 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
query = append(query, titleQuery.String())
}
if directoriesOnly {
query = append(query, fmt.Sprintf("(mimeType='%s' or mimeType='%s')", driveFolderType, shortcutMimeType))
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
}
if filesOnly {
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
@@ -766,7 +742,22 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
list.Spaces("appDataFolder")
}
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.fileFields)
var fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
fields = fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", fields)
OUTER:
for {
@@ -783,24 +774,6 @@ OUTER:
}
for _, item := range files.Files {
item.Name = f.opt.Enc.ToStandardName(item.Name)
if isShortcut(item) {
// ignore shortcuts if directed
if f.opt.SkipShortcuts {
continue
}
// skip file shortcuts if directory only
if directoriesOnly && item.ShortcutDetails.TargetMimeType != driveFolderType {
continue
}
// skip directory shortcuts if file only
if filesOnly && item.ShortcutDetails.TargetMimeType == driveFolderType {
continue
}
item, err = f.resolveShortcut(item)
if err != nil {
return false, errors.Wrap(err, "list")
}
}
// Check the case of items is correct since
// the `=` operator is case insensitive.
if title != "" && title != item.Name {
@@ -1083,10 +1056,8 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
root: root,
opt: *opt,
pacer: newPacer(opt),
m: m,
}
f.isTeamDrive = opt.TeamDriveID != ""
f.fileFields = f.getFileFields()
f.features = (&fs.Features{
DuplicateFiles: true,
ReadMimeType: true,
@@ -1198,28 +1169,9 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate: modifiedDate,
mimeType: info.MimeType,
bytes: size,
parents: len(info.Parents),
}
}
// getFileFields gets the fields for a normal file Get or List
func (f *Fs) getFileFields() (fields googleapi.Field) {
fields = partialFields
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
if f.opt.SizeAsQuota {
fields += ",quotaBytesUsed"
}
return fields
}
// newRegularObject creates a fs.Object for a normal drive.File
func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
@@ -1233,7 +1185,7 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
}
return &Object{
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, info.Id),
md5sum: strings.ToLower(info.Md5Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
@@ -1245,18 +1197,17 @@ func (f *Fs) newDocumentObject(remote string, info *drive.File, extension, expor
if err != nil {
return nil, err
}
id := actualID(info.Id)
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, id, url.QueryEscape(mediaType))
url := fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, info.Id, url.QueryEscape(mediaType))
if f.opt.AlternateExport {
switch info.MimeType {
case "application/vnd.google-apps.drawing":
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", info.Id, extension[1:])
case "application/vnd.google-apps.document":
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/document/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.spreadsheet":
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/spreadsheets/d/%s/export?format=%s", info.Id, extension[1:])
case "application/vnd.google-apps.presentation":
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", id, extension[1:])
url = fmt.Sprintf("https://docs.google.com/presentation/d/%s/export/%s", info.Id, extension[1:])
}
}
baseObject := f.newBaseObject(remote+extension, info)
@@ -1314,37 +1265,23 @@ func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, erro
// When the drive.File cannot be represented as a fs.Object it will return (nil, nil).
func (f *Fs) newObjectWithExportInfo(
remote string, info *drive.File,
extension, exportName, exportMimeType string, isDocument bool) (o fs.Object, err error) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
info, err = f.resolveShortcut(info)
if err != nil {
return nil, errors.Wrap(err, "new object")
}
extension, exportName, exportMimeType string, isDocument bool) (fs.Object, error) {
switch {
case info.MimeType == driveFolderType:
return nil, fs.ErrorNotAFile
case info.MimeType == shortcutMimeType:
// We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely.
fs.Debugf(remote, "Ignoring shortcut as skip shortcuts is set")
return nil, fs.ErrorObjectNotFound
case info.Md5Checksum != "" || info.Size > 0:
// If item has MD5 sum or a length it is a file stored on drive
return f.newRegularObject(remote, info), nil
case f.opt.SkipGdocs:
fs.Debugf(remote, "Skipping google document type %q", info.MimeType)
return nil, fs.ErrorObjectNotFound
return nil, nil
default:
// If item MimeType is in the ExportFormats then it is a google doc
if !isDocument {
fs.Debugf(remote, "Ignoring unknown document type %q", info.MimeType)
return nil, fs.ErrorObjectNotFound
return nil, nil
}
if extension == "" {
fs.Debugf(remote, "No export formats found for %q", info.MimeType)
return nil, fs.ErrorObjectNotFound
return nil, nil
}
if isLinkMimeType(exportMimeType) {
return f.newLinkObject(remote, info, extension, exportMimeType)
@@ -1376,7 +1313,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
pathID = actualID(pathID)
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
_, exportName, _, isDocument := f.findExportFormat(item)
@@ -1570,7 +1506,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
directoryID = actualID(directoryID)
var iErr error
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
@@ -1749,7 +1684,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
if err != nil {
return err
}
directoryID = actualID(directoryID)
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
@@ -1763,12 +1697,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
mu.Lock()
defer mu.Unlock()
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
job := listREntry{actualID(d.ID()), d.Remote()}
select {
case in <- job:
case in <- listREntry{d.ID(), d.Remote()}:
wg.Add(1)
default:
overflow = append(overflow, job)
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
@@ -1845,87 +1778,10 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return nil
}
const shortcutSeparator = '\t'
// joinID adds an actual drive ID to the shortcut ID it came from
//
// directoryIDs in the dircache are these composite directory IDs so
// we must always unpack them before use.
func joinID(actual, shortcut string) string {
return actual + string(shortcutSeparator) + shortcut
}
// splitID separates an actual ID and a shortcut ID from a composite
// ID. If there was no shortcut ID then it will return "" for it.
func splitID(compositeID string) (actualID, shortcutID string) {
i := strings.IndexRune(compositeID, shortcutSeparator)
if i < 0 {
return compositeID, ""
}
return compositeID[:i], compositeID[i+1:]
}
// isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID(compositeID string) bool {
return strings.IndexRune(compositeID, shortcutSeparator) >= 0
}
// actualID returns an actual ID from a composite ID
func actualID(compositeID string) (actualID string) {
actualID, _ = splitID(compositeID)
return actualID
}
// shortcutID returns a shortcut ID from a composite ID if available,
// or the actual ID if not.
func shortcutID(compositeID string) (shortcutID string) {
actualID, shortcutID := splitID(compositeID)
if shortcutID != "" {
return shortcutID
}
return actualID
}
// isShortcut returns true of the item is a shortcut
func isShortcut(item *drive.File) bool {
return item.MimeType == shortcutMimeType && item.ShortcutDetails != nil
}
// Dereference shortcut if required. It returns the newItem (which may
// be just item).
//
// If we return a new item then the ID will be adjusted to be a
// composite of the actual ID and the shortcut ID. This is to make
// sure that we have decided in all use places what we are doing with
// the ID.
//
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
func (f *Fs) resolveShortcut(item *drive.File) (newItem *drive.File, err error) {
if f.opt.SkipShortcuts || item.MimeType != shortcutMimeType {
return item, nil
}
if item.ShortcutDetails == nil {
fs.Errorf(nil, "Expecting shortcutDetails in %v", item)
return item, nil
}
newItem, err = f.getFile(item.ShortcutDetails.TargetId, f.fileFields)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve shortcut")
}
// make sure we use the Name and Parents from the original item
newItem.Name = item.Name
newItem.Parents = item.Parents
// the new ID is a composite ID
newItem.Id = joinID(newItem.Id, item.Id)
return newItem, nil
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
// When the drive.File cannot be represented as a fs.DirEntry
// (nil, nil) is returned.
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry, err error) {
func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error) {
switch {
case item.MimeType == driveFolderType:
// cache the directory ID for later lookups
@@ -1936,11 +1792,7 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (entry fs.DirEntry,
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
default:
entry, err = f.newObjectWithInfo(remote, item)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return entry, err
return f.newObjectWithInfo(remote, item)
}
return nil, nil
}
@@ -1953,7 +1805,6 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
if err != nil {
return nil, err
}
directoryID = actualID(directoryID)
leaf = f.opt.Enc.FromStandardName(leaf)
// Define the metadata for the file we are going to create.
@@ -2057,18 +1908,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) < 2 {
return nil
}
newDirs := dirs[:0]
for _, dir := range dirs {
if isShortcutID(dir.ID()) {
fs.Infof(dir, "skipping shortcut directory")
continue
}
newDirs = append(newDirs, dir)
}
dirs = newDirs
if len(dirs) < 2 {
return nil
}
@@ -2102,7 +1941,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
// rmdir (into trash) the now empty source directory
fs.Infof(srcDir, "removing empty directory")
err = f.delete(ctx, srcDir.ID(), true)
err = f.rmdir(ctx, srcDir.ID(), true)
if err != nil {
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
}
@@ -2122,20 +1961,20 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
// delete a file or directory unconditionally by ID
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
// Rmdir deletes a directory unconditionally by ID
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
var err error
if useTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(id, &info).
_, err = f.svc.Files.Update(directoryID, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(id).
err = f.svc.Files.Delete(directoryID).
Fields("").
SupportsAllDrives(true).
Do()
@@ -2154,11 +1993,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if err != nil {
return err
}
directoryID, shortcutID := splitID(directoryID)
// if directory is a shortcut remove it regardless
if shortcutID != "" {
return f.delete(ctx, shortcutID, f.opt.UseTrash)
}
var trashedFiles = false
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
if !item.Trashed {
@@ -2179,7 +2013,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
err = f.delete(ctx, directoryID, trashedFiles || f.opt.UseTrash)
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
if err != nil {
return err
}
@@ -2208,13 +2042,11 @@ func (f *Fs) Precision() time.Duration {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
var srcObj *baseObject
ext := ""
readDescription := false
switch src := src.(type) {
case *Object:
srcObj = &src.baseObject
case *documentObject:
srcObj, ext = &src.baseObject, src.ext()
readDescription = true
case *linkObject:
srcObj, ext = &src.baseObject, src.ext()
default:
@@ -2238,25 +2070,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
if readDescription {
// preserve the description on copy for docs
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
return nil, errors.Wrap(err, "failed to read description for Google Doc")
}
createInfo.Description = info.Description
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
createInfo.Description = ""
}
// get the ID of the thing to copy - this is the shortcut if available
id := shortcutID(srcObj.id)
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(id, createInfo).
info, err = f.svc.Files.Copy(srcObj.id, createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever).
@@ -2295,7 +2111,23 @@ func (f *Fs) Purge(ctx context.Context) error {
if err != nil {
return err
}
err = f.delete(ctx, shortcutID(f.dirCache.RootID()), f.opt.UseTrash)
err = f.pacer.Call(func() (bool, error) {
if f.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = f.svc.Files.Update(f.dirCache.RootID(), &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = f.svc.Files.Delete(f.dirCache.RootID()).
Fields("").
SupportsAllDrives(true).
Do()
}
return f.shouldRetry(err)
})
f.dirCache.ResetRoot()
if err != nil {
return err
@@ -2401,7 +2233,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
srcParentID = actualID(srcParentID)
// Temporary Object under construction
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
@@ -2414,7 +2245,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
info, err = f.svc.Files.Update(srcObj.id, dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
Fields(partialFields).
@@ -2434,14 +2265,13 @@ func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err er
id, err := f.dirCache.FindDir(ctx, remote, false)
if err == nil {
fs.Debugf(f, "attempting to share directory '%s'", remote)
id = shortcutID(id)
} else {
fs.Debugf(f, "attempting to share single file '%s'", remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
return "", err
}
id = shortcutID(o.(fs.IDer).ID())
id = o.(fs.IDer).ID()
}
permission := &drive.Permission{
@@ -2516,7 +2346,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return err
}
dstDirectoryID = actualID(dstDirectoryID)
// Check destination does not exist
if dstRemote != "" {
@@ -2540,19 +2369,19 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return err
}
srcDirectoryID = actualID(srcDirectoryID)
// Find ID of src
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
if err != nil {
return err
}
// Do the move
patch := drive.File{
Name: leaf,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.svc.Files.Update(shortcutID(srcID), &patch).
_, err = f.svc.Files.Update(srcID, &patch).
RemoveParents(srcDirectoryID).
AddParents(dstDirectoryID).
Fields("").
@@ -2729,258 +2558,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
chunkSizeInt, err := strconv.ParseInt(chunkSizeString, 10, 64)
if err != nil {
return errors.Wrap(err, "couldn't convert chunk size to int")
}
chunkSize := fs.SizeSuffix(chunkSizeInt)
if chunkSize == f.opt.ChunkSize {
return nil
}
err = checkUploadChunkSize(chunkSize)
if err == nil {
f.opt.ChunkSize = chunkSize
}
return err
}
func (f *Fs) changeServiceAccountFile(file string) (err error) {
fs.Debugf(nil, "Changing Service Account File from %s to %s", f.opt.ServiceAccountFile, file)
if file == f.opt.ServiceAccountFile {
return nil
}
oldSvc := f.svc
oldv2Svc := f.v2Svc
oldOAuthClient := f.client
oldFile := f.opt.ServiceAccountFile
oldCredentials := f.opt.ServiceAccountCredentials
defer func() {
// Undo all the changes instead of doing selective undo's
if err != nil {
f.svc = oldSvc
f.v2Svc = oldv2Svc
f.client = oldOAuthClient
f.opt.ServiceAccountFile = oldFile
f.opt.ServiceAccountCredentials = oldCredentials
}
}()
f.opt.ServiceAccountFile = file
f.opt.ServiceAccountCredentials = ""
oAuthClient, err := createOAuthClient(&f.opt, f.name, f.m)
if err != nil {
return errors.Wrap(err, "drive: failed when making oauth client")
}
f.client = oAuthClient
f.svc, err = drive.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive client")
}
if f.opt.V2DownloadMinSize >= 0 {
f.v2Svc, err = drive_v2.New(f.client)
if err != nil {
return errors.Wrap(err, "couldn't create Drive v2 client")
}
}
return nil
}
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
//
// Will not overwrite existing files
func (f *Fs) makeShortcut(ctx context.Context, srcPath string, dstFs *Fs, dstPath string) (o fs.Object, err error) {
srcFs := f
srcPath = strings.Trim(srcPath, "/")
dstPath = strings.Trim(dstPath, "/")
if dstPath == "" {
return nil, errors.New("shortcut destination can't be root directory")
}
// Find source
var srcID string
isDir := false
if srcPath == "" {
// source is root directory
err = f.dirCache.FindRoot(ctx, false)
if err != nil {
return nil, err
}
srcID = f.dirCache.RootID()
isDir = true
} else if srcObj, err := srcFs.NewObject(ctx, srcPath); err != nil {
if err != fs.ErrorNotAFile {
return nil, errors.Wrap(err, "can't find source")
}
// source was a directory
srcID, err = srcFs.dirCache.FindDir(ctx, srcPath, false)
if err != nil {
return nil, errors.Wrap(err, "failed to find source dir")
}
isDir = true
} else {
// source was a file
srcID = srcObj.(*Object).id
}
srcID = actualID(srcID) // link to underlying object not to shortcut
// Find destination
_, err = dstFs.NewObject(ctx, dstPath)
if err != fs.ErrorObjectNotFound {
if err == nil {
err = errors.New("existing file")
} else if err == fs.ErrorNotAFile {
err = errors.New("existing directory")
}
return nil, errors.Wrap(err, "not overwriting shortcut target")
}
// Create destination shortcut
createInfo, err := dstFs.createFileInfo(ctx, dstPath, time.Now())
if err != nil {
return nil, errors.Wrap(err, "shortcut destination failed")
}
createInfo.MimeType = shortcutMimeType
createInfo.ShortcutDetails = &drive.FileShortcutDetails{
TargetId: srcID,
}
var info *drive.File
err = dstFs.pacer.CallNoRetry(func() (bool, error) {
info, err = dstFs.svc.Files.Create(createInfo).
Fields(partialFields).
SupportsAllDrives(true).
KeepRevisionForever(dstFs.opt.KeepRevisionForever).
Do()
return dstFs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "shortcut creation failed")
}
if isDir {
return nil, nil
}
return dstFs.newObjectWithInfo(dstPath, info)
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
Long: `This is a get command which will be used to fetch the various drive config parameters
Usage Examples:
rclone backend get drive: [-o service_account_file] [-o chunk_size]
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
`,
Opts: map[string]string{
"chunk_size": "show the current upload chunk size",
"service_account_file": "show the current service account file",
},
}, {
Name: "set",
Short: "Set command for updating the drive config parameters",
Long: `This is a set command which will be used to update the various drive config parameters
Usage Examples:
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
`,
Opts: map[string]string{
"chunk_size": "update the current upload chunk size",
"service_account_file": "update the current service account file",
},
}, {
Name: "shortcut",
Short: "Create shortcuts from files or directories",
Long: `This command creates shortcuts from files or directories.
Usage:
rclone backend shortcut drive: source_item destination_shortcut
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut". The
"source_item" and the "destination_shortcut" should be relative paths
from "drive:"
In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to
"drive2:". This may fail with a permission error if the user
authenticated with "drive2:" can't read files from "drive:".
`,
Opts: map[string]string{
"target": "optional target remote for the shortcut destination",
},
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "get":
out := make(map[string]string)
if _, ok := opt["service_account_file"]; ok {
out["service_account_file"] = f.opt.ServiceAccountFile
}
if _, ok := opt["chunk_size"]; ok {
out["chunk_size"] = fmt.Sprintf("%s", f.opt.ChunkSize)
}
return out, nil
case "set":
out := make(map[string]map[string]string)
if serviceAccountFile, ok := opt["service_account_file"]; ok {
serviceAccountMap := make(map[string]string)
serviceAccountMap["previous"] = f.opt.ServiceAccountFile
if err = f.changeServiceAccountFile(serviceAccountFile); err != nil {
return out, err
}
f.m.Set("service_account_file", serviceAccountFile)
serviceAccountMap["current"] = f.opt.ServiceAccountFile
out["service_account_file"] = serviceAccountMap
}
if chunkSize, ok := opt["chunk_size"]; ok {
chunkSizeMap := make(map[string]string)
chunkSizeMap["previous"] = fmt.Sprintf("%s", f.opt.ChunkSize)
if err = f.changeChunkSize(chunkSize); err != nil {
return out, err
}
chunkSizeString := fmt.Sprintf("%s", f.opt.ChunkSize)
f.m.Set("chunk_size", chunkSizeString)
chunkSizeMap["current"] = chunkSizeString
out["chunk_size"] = chunkSizeMap
}
return out, nil
case "shortcut":
if len(arg) != 2 {
return nil, errors.New("need exactly 2 arguments")
}
dstFs := f
target, ok := opt["target"]
if ok {
targetFs, err := cache.Get(target)
if err != nil {
return nil, errors.Wrap(err, "couldn't find target")
}
dstFs, ok = targetFs.(*Fs)
if !ok {
return nil, errors.New("target is not a drive backend")
}
}
return f.makeShortcut(ctx, arg[0], dstFs, arg[1])
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -3041,9 +2618,8 @@ func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
}
return nil, "", "", "", false, err
}
directoryID = actualID(directoryID)
found, err := f.list(ctx, []string{directoryID}, leaf, false, false, false, func(item *drive.File) bool {
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
if !f.opt.SkipGdocs {
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
if exportName == leaf {
@@ -3093,7 +2669,7 @@ func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
var info *drive.File
err := o.fs.pacer.Call(func() (bool, error) {
var err error
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Fields(partialFields).
SupportsAllDrives(true).
Do()
@@ -3222,7 +2798,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.v2Download {
var v2File *drive_v2.File
err = o.fs.pacer.Call(func() (bool, error) {
v2File, err = o.fs.v2Svc.Files.Get(actualID(o.id)).
v2File, err = o.fs.v2Svc.Files.Get(o.id).
Fields("downloadUrl").
SupportsAllDrives(true).
Do()
@@ -3301,7 +2877,7 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
if size >= 0 && size < int64(o.fs.opt.UploadCutoff) {
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
info, err = o.fs.svc.Files.Update(actualID(o.id), updateInfo).
info, err = o.fs.svc.Files.Update(o.id, updateInfo).
Media(in, googleapi.ContentType(uploadMimeType)).
Fields(partialFields).
SupportsAllDrives(true).
@@ -3321,26 +2897,6 @@ func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadM
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// If o is a shortcut
if isShortcutID(o.id) {
// Delete it first
err := o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
if err != nil {
return err
}
// Then put the file as a new file
newObj, err := o.fs.PutUnchecked(ctx, in, src, options...)
if err != nil {
return err
}
// Update the object
if newO, ok := newObj.(*Object); ok {
*o = *newO
} else {
fs.Debugf(newObj, "Failed to update object %T from new object %T", o, newObj)
}
return nil
}
srcMimeType := fs.MimeType(ctx, src)
updateInfo := &drive.File{
MimeType: srcMimeType,
@@ -3411,10 +2967,25 @@ func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo
// Remove an object
func (o *baseObject) Remove(ctx context.Context) error {
if o.parents > 1 {
return errors.New("can't delete safely - has multiple parents")
}
return o.fs.delete(ctx, shortcutID(o.id), o.fs.opt.UseTrash)
var err error
err = o.fs.pacer.Call(func() (bool, error) {
if o.fs.opt.UseTrash {
info := drive.File{
Trashed: true,
}
_, err = o.fs.svc.Files.Update(o.id, &info).
Fields("").
SupportsAllDrives(true).
Do()
} else {
err = o.fs.svc.Files.Delete(o.id).
Fields("").
SupportsAllDrives(true).
Do()
}
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
@@ -3476,7 +3047,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
@@ -269,98 +268,6 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = "file name.txt"
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
srcHash, err := srcObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.NotEqual(t, "", srcHash)
t.Run("Errors", func(t *testing.T) {
_, err := f.makeShortcut(ctx, "", f, "")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't be root")
_, err = f.makeShortcut(ctx, "notfound", f, "dst")
assert.Error(t, err)
assert.Contains(t, err.Error(), "can't find source")
_, err = f.makeShortcut(ctx, existingFile, f, existingFile)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing file")
_, err = f.makeShortcut(ctx, existingFile, f, existingDir)
assert.Error(t, err)
assert.Contains(t, err.Error(), "not overwriting")
assert.Contains(t, err.Error(), "existing directory")
})
t.Run("File", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingFile, f, "shortcut.txt")
require.NoError(t, err)
require.NotNil(t, dstObj)
assert.Equal(t, "shortcut.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
t.Run("Dir", func(t *testing.T) {
dstObj, err := f.makeShortcut(ctx, existingDir, f, "shortcutdir")
require.NoError(t, err)
require.Nil(t, dstObj)
entries, err := f.List(ctx, "shortcutdir")
require.NoError(t, err)
require.Equal(t, 1, len(entries))
require.Equal(t, "shortcutdir/"+existingSubDir, entries[0].Remote())
require.NoError(t, f.Rmdir(ctx, "shortcutdir"))
})
t.Run("Command", func(t *testing.T) {
_, err := f.Command(ctx, "shortcut", []string{"one"}, nil)
require.Error(t, err)
require.Contains(t, err.Error(), "need exactly 2 arguments")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": "doesnotexistremote:",
})
require.Error(t, err)
require.Contains(t, err.Error(), "couldn't find target")
_, err = f.Command(ctx, "shortcut", []string{"one", "two"}, map[string]string{
"target": ".",
})
require.Error(t, err)
require.Contains(t, err.Error(), "target is not a drive backend")
dstObjI, err := f.Command(ctx, "shortcut", []string{existingFile, "shortcut2.txt"}, map[string]string{
"target": fs.ConfigString(f),
})
require.NoError(t, err)
dstObj := dstObjI.(*Object)
assert.Equal(t, "shortcut2.txt", dstObj.Remote())
dstHash, err := dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
dstObjI, err = f.Command(ctx, "shortcut", []string{existingFile, "shortcut3.txt"}, nil)
require.NoError(t, err)
dstObj = dstObjI.(*Object)
assert.Equal(t, "shortcut3.txt", dstObj.Remote())
dstHash, err = dstObj.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, srcHash, dstHash)
require.NoError(t, dstObj.Remove(ctx))
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -375,7 +282,6 @@ func (f *Fs) InternalTest(t *testing.T) {
})
})
})
t.Run("Shortcuts", f.InternalTestShortcuts)
}
var _ fstests.InternalTester = (*Fs)(nil)

6
backend/dropbox/dropbox.go Executable file → Normal file
View File

@@ -225,11 +225,7 @@ func shouldRetry(err error) (bool, error) {
return false, err
}
baseErrString := errors.Cause(err).Error()
// First check for Insufficient Space
if strings.Contains(baseErrString, "insufficient_space") {
return false, fserrors.FatalError(err)
}
// Then handle any official Retry-After header from Dropbox's SDK
// handle any official Retry-After header from Dropbox's SDK first
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {

View File

@@ -17,7 +17,6 @@ import (
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
403, // Forbidden (may happen when request limit is exceeded)
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
@@ -321,7 +320,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return response, err
}
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string, options ...fs.OpenOption) (response *http.Response, err error) {
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
// fs.Debugf(f, "Uploading File `%s`", fileName)
fileName = f.opt.Enc.FromStandardName(fileName)
@@ -339,7 +338,6 @@ func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName,
NoResponse: true,
Body: in,
ContentLength: &size,
Options: options,
MultipartContentName: "file[]",
MultipartFileName: fileName,
MultipartParams: map[string][]string{

View File

@@ -23,12 +23,11 @@ import (
)
const (
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 400 * time.Millisecond // api is extremely rate limited now
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
attackConstant = 0 // start with max sleep
rootID = "0"
apiBaseURL = "https://api.1fichier.com/v1"
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
maxSleep = 5 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
func init() {
@@ -186,7 +185,7 @@ func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
baseClient: &http.Client{},
}
@@ -339,7 +338,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
return nil, err
}
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
if err != nil {
return nil, err
}

View File

@@ -242,9 +242,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "COLDLINE",
Help: "Coldline storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage class",
}, {
Value: "DURABLE_REDUCED_AVAILABILITY",
Help: "Durable reduced availability storage class",
@@ -558,7 +555,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
@@ -1066,33 +1063,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
object.CacheControl = value
case "content-disposition":
object.ContentDisposition = value
case "content-encoding":
object.ContentEncoding = value
case "content-language":
object.ContentLanguage = value
case "content-type":
object.ContentType = value
default:
const googMetaPrefix = "x-goog-meta-"
if strings.HasPrefix(lowerKey, googMetaPrefix) {
metaKey := lowerKey[len(googMetaPrefix):]
object.Metadata[metaKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
var newObject *storage.Object
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)

View File

@@ -134,20 +134,14 @@ rclone mount needs to know the size of files in advance of reading
them, so setting this flag when using rclone mount is recommended if
you want to read the media.`,
Advanced: true,
}, {
Name: "start_year",
Default: 2000,
Help: `Year limits the photos to be downloaded to those which are uploaded after the given year`,
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
StartYear int `config:"start_year"`
ReadOnly bool `config:"read_only"`
ReadSize bool `config:"read_size"`
}
// Fs represents a remote storage server
@@ -208,11 +202,6 @@ func (f *Fs) dirTime() time.Time {
return f.startTime
}
// startYear returns the start year
func (f *Fs) startYear() int {
return f.opt.StartYear
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
@@ -235,10 +224,6 @@ func errorHandler(resp *http.Response) error {
if err != nil {
body = nil
}
// Google sends 404 messages as images so be prepared for that
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
var e = api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
@@ -958,9 +943,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Upload the media item in exchange for an UploadToken
opts := rest.Opts{
Method: "POST",
Path: "/uploads",
Options: options,
Method: "POST",
Path: "/uploads",
ExtraHeaders: map[string]string{
"X-Goog-Upload-File-Name": fileName,
"X-Goog-Upload-Protocol": "raw",

View File

@@ -23,7 +23,6 @@ type lister interface {
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
dirTime() time.Time
startYear() int
}
// dirPattern describes a single directory pattern
@@ -223,10 +222,11 @@ func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []
return nil, "", nil
}
// Return the years from startYear to today
// Return the years from 2000 to today
// FIXME make configurable?
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
currentYear := f.dirTime().Year()
for year := f.startYear(); year <= currentYear; year++ {
for year := 2000; year <= currentYear; year++ {
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
}
return entries, nil

View File

@@ -59,11 +59,6 @@ func (f *testLister) dirTime() time.Time {
return startTime
}
// mock startYear for testing
func (f *testLister) startYear() int {
return 2000
}
func TestPatternMatch(t *testing.T) {
for testNumber, test := range []struct {
// input

View File

@@ -166,7 +166,8 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
// check object not found
o, err = f.NewObject(context.Background(), "not found.txt")

View File

@@ -164,12 +164,6 @@ type CustomerInfo struct {
IOSHash string `json:"ios_hash"`
}
// TrashResponse is returned when emptying the Trash
type TrashResponse struct {
Folders int64 `json:"folders"`
Files int64 `json:"files"`
}
// XML structures returned by the old API
// Flag is a hacky type for checking if an attribute is present

View File

@@ -140,11 +140,6 @@ func init() {
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "trashed_only",
Help: "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
Default: false,
Advanced: true,
}, {
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
@@ -179,7 +174,6 @@ type Options struct {
Device string `config:"device"`
Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
TrashedOnly bool `config:"trashed_only"`
HardDelete bool `config:"hard_delete"`
Unlink bool `config:"unlink"`
UploadThreshold fs.SizeSuffix `config:"upload_resume_limit"`
@@ -525,9 +519,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: true,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
f.features.ListR = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
@@ -647,13 +638,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return nil, errors.Wrap(err, "couldn't list files")
}
if bool(result.Deleted) && !f.opt.TrashedOnly {
if result.Deleted {
return nil, fs.ErrorDirNotFound
}
for i := range result.Folders {
item := &result.Folders[i]
if !f.opt.TrashedOnly && bool(item.Deleted) {
if item.Deleted {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
@@ -663,14 +654,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for i := range result.Files {
item := &result.Files[i]
if f.opt.TrashedOnly {
if !item.Deleted || item.State != "COMPLETED" {
continue
}
} else {
if item.Deleted || item.State != "COMPLETED" {
continue
}
if item.Deleted || item.State != "COMPLETED" {
continue
}
remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name))
o, err := f.newObjectWithInfo(ctx, remote, item)
@@ -1064,22 +1049,6 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return usage, nil
}
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) error {
opts := rest.Opts{
Method: "POST",
Path: "files/v1/purge_trash",
}
var info api.TrashResponse
_, err := f.apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return errors.Wrap(err, "couldn't empty trash")
}
return nil
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
@@ -1153,7 +1122,7 @@ func (o *Object) readMetaData(ctx context.Context, force bool) (err error) {
if err != nil {
return err
}
if bool(info.Deleted) && !o.fs.opt.TrashedOnly {
if info.Deleted {
return fs.ErrorObjectNotFound
}
return o.setMetaData(info)
@@ -1290,7 +1259,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts := rest.Opts{
Method: "POST",
Path: "files/v1/allocate",
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Time(src.ModTime(ctx)).APIString()
@@ -1387,7 +1355,6 @@ var (
_ fs.ListRer = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -41,7 +41,6 @@ func init() {
Name: "local",
Description: "Local Disk",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "nounc",
Help: "Disable UNC (long path names) conversion on Windows",
@@ -698,50 +697,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Supported()
}
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
switch name {
case "noop":
if txt, ok := opt["error"]; ok {
if txt == "" {
txt = "unspecified error"
}
return nil, errors.New(txt)
}
if _, ok := opt["echo"]; ok {
out := map[string]interface{}{}
out["name"] = name
out["arg"] = arg
out["opt"] = opt
return out, nil
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -768,17 +723,8 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
oldtime := o.modTime
oldsize := o.size
err := o.lstat()
var changed bool
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
// If file not found then we assume any accumulated
// hashes are OK - this will error on Open
changed = true
} else {
return "", errors.Wrap(err, "hash: failed to stat")
}
} else {
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
return "", errors.Wrap(err, "hash: failed to stat")
}
o.fs.objectHashesMu.Lock()
@@ -786,7 +732,7 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
hashValue, hashFound := o.hashes[r]
o.fs.objectHashesMu.Unlock()
if changed || hashes == nil || !hashFound {
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
var in io.ReadCloser
if !o.translatedLink {
@@ -1031,7 +977,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
// Pre-allocate the file for performance reasons
err = file.PreAllocate(src.Size(), f)
err = preAllocate(src.Size(), f)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
@@ -1118,12 +1064,12 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, err
}
// Pre-allocate the file for performance reasons
err = file.PreAllocate(size, out)
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
err = setSparse(out)
if err != nil {
fs.Debugf(o, "Failed to set sparse: %v", err)
}
@@ -1218,7 +1164,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Commander = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.Object = &Object{}
)

View File

@@ -0,0 +1,15 @@
//+build !windows,!linux
package local
import "os"
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
return nil
}
// setSparse makes the file be a sparse file
func setSparse(out *os.File) error {
return nil
}

View File

@@ -1,6 +1,6 @@
//+build linux
package file
package local
import (
"os"
@@ -18,8 +18,8 @@ var (
fallocFlagsIndex int32
)
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
@@ -45,7 +45,7 @@ again:
return err
}
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
// setSparse makes the file be a sparse file
func setSparse(out *os.File) error {
return nil
}

View File

@@ -1,6 +1,6 @@
//+build windows
package file
package local
import (
"os"
@@ -32,8 +32,8 @@ type ioStatusBlock struct {
Status, Information uintptr
}
// PreAllocate the file for performance reasons
func PreAllocate(size int64, out *os.File) error {
// preAllocate the file for performance reasons
func preAllocate(size int64, out *os.File) error {
if size <= 0 {
return nil
}
@@ -82,8 +82,8 @@ const (
FSCTL_SET_SPARSE = 0x000900c4
)
// SetSparse makes the file be a sparse file
func SetSparse(out *os.File) error {
// setSparse makes the file be a sparse file
func setSparse(out *os.File) error {
err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
if err != nil {
return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE")

77
backend/onedrive/onedrive.go Executable file → Normal file
View File

@@ -184,28 +184,6 @@ func init() {
log.Fatalf("Failed to query available drives: %v", err)
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
log.Fatalf("No drives found")
} else {
@@ -248,9 +226,8 @@ func init() {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
Above this size files will be chunked - must be multiple of 320k (327,680 bytes) and
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
Note that the chunks will be buffered into memory.`,
Above this size files will be chunked - must be multiple of 320k (327,680 bytes). Note
that the chunks will be buffered into memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
@@ -274,16 +251,6 @@ delete OneNote files or otherwise want them to show up in directory
listing, set this option.`,
Default: false,
Advanced: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different onedrive configs.
This can be useful if you wish to do a server side copy between two
different Onedrives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
configurations.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -336,12 +303,11 @@ configurations.`,
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote one drive
@@ -436,8 +402,6 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", retryAfter)
}
}
case 507: // Insufficient Storage
return false, fserrors.FatalError(err)
}
}
return retry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
@@ -612,7 +576,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f)
f.srv.SetErrorHandler(errorHandler)
@@ -1025,13 +988,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
// Check we aren't overwriting a file on the same remote
if srcObj.fs == f {
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
srcPath := srcObj.rootPath()
dstPath := f.rootPath(remote)
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
}
// Create temporary object
@@ -1612,7 +1572,7 @@ func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err er
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
@@ -1625,7 +1585,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
ContentLength: &toSend,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
Body: chunk,
Options: options,
}
_, _ = chunk.Seek(skip, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
@@ -1683,7 +1642,7 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
}
// uploadMultipart uploads a file using multipart upload
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size <= 0 {
return nil, errors.New("unknown-sized upload not supported")
}
@@ -1734,7 +1693,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
}
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n)
if err != nil {
return nil, err
}
@@ -1747,7 +1706,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
@@ -1764,7 +1723,6 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
Path: "/" + drive + "/items/" + trueDirID + ":/" + rest.URLPathEscape(o.fs.opt.Enc.FromStandardName(leaf)) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
} else {
opts = rest.Opts{
@@ -1772,7 +1730,6 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
Path: "/root:/" + rest.URLPathEscape(o.srvPath()) + ":/content",
ContentLength: &size,
Body: in,
Options: options,
}
}
@@ -1814,9 +1771,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var info *api.Item
if size > 0 {
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
info, err = o.uploadMultipart(ctx, in, size, modTime)
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
info, err = o.uploadSinglepart(ctx, in, size, modTime)
} else {
return errors.New("unknown-sized upload not supported")
}

View File

@@ -687,9 +687,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
Name: leaf,
}
opts := rest.Opts{
Method: "POST",
Options: options,
Path: "/upload/create_file.json",
Method: "POST",
Path: "/upload/create_file.json",
}
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createFileData, &response)
return o.fs.shouldRetry(resp, err)
@@ -971,9 +970,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
openUploadData := openUpload{SessionID: o.fs.session.SessionID, FileID: o.id, Size: size}
// fs.Debugf(nil, "PreOpen: %#v", openUploadData)
opts := rest.Opts{
Method: "POST",
Options: options,
Path: "/upload/open_file_upload.json",
Method: "POST",
Path: "/upload/open_file_upload.json",
}
resp, err := o.fs.srv.CallJSON(ctx, &opts, &openUploadData, &openResponse)
return o.fs.shouldRetry(resp, err)

View File

@@ -41,7 +41,8 @@ const (
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
rootID = "d0" // ID of root folder is always this
rootURL = "https://api.pcloud.com"
)
@@ -88,19 +89,13 @@ func init() {
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "root_folder_id",
Help: "Fill in for rclone to use a non root folder as its starting point.",
Default: "d0",
Advanced: true,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
RootFolderID string `config:"root_folder_id"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote pcloud
@@ -270,8 +265,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return err
})
// Get rootFolderID
rootID := f.opt.RootFolderID
// Get rootID
f.dirCache = dircache.New(root, rootID, f)
// Find the current root
@@ -1080,7 +1074,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size,
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Options: options,
}
leaf = o.fs.opt.Enc.FromStandardName(leaf)
opts.Parameters.Set("filename", leaf)

View File

@@ -517,7 +517,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
return f.PutUnchecked(ctx, in, src)
default:
return nil, err
}
@@ -1002,7 +1002,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Method: "POST",
Path: "/folder/uploadinfo",
Parameters: o.fs.baseParams(),
Options: options,
MultipartParams: url.Values{
"id": {directoryID},
},

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
@@ -35,8 +34,7 @@ type Fs struct {
client *putio.Client // client for making API calls to Put.io
pacer *fs.Pacer // To pace the API calls
dirCache *dircache.DirCache // Map of directory path to directory id
httpClient *http.Client // base http client
oAuthClient *http.Client // http client with oauth Authorization
oAuthClient *http.Client
}
// ------------------------------------------------------------
@@ -70,8 +68,7 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
if err != nil {
return nil, err
}
httpClient := fshttp.NewClient(fs.Config)
oAuthClient, _, err := oauthutil.NewClientWithBaseClient(name, m, putioConfig, httpClient)
oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure putio")
}
@@ -81,7 +78,6 @@ func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
client: putio.NewClient(oAuthClient),
httpClient: httpClient,
oAuthClient: oAuthClient,
}
p.features = (&fs.Features{
@@ -257,7 +253,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil {
return nil, err
}
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx), options)
loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx))
if err != nil {
return nil, err
}
@@ -277,7 +273,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObjectWithInfo(ctx, remote, entry)
}
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time, options []fs.OpenOption) (location string, err error) {
func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) {
// defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err)
err = f.pacer.Call(func() (bool, error) {
req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil)
@@ -292,7 +288,6 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID))
b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339)))
req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt))
fs.OpenOptionAddHTTPHeaders(req.Header, options)
resp, err := f.oAuthClient.Do(req)
retry, err := shouldRetry(err)
if retry {

View File

@@ -241,7 +241,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
req.Header.Set(header, value)
}
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
resp, err = o.fs.httpClient.Do(req)
resp, err = http.DefaultClient.Do(req)
return shouldRetry(err)
})
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {

View File

@@ -864,76 +864,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
})
}
// cleanUpBucket removes all pending multipart uploads for a given bucket
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
bucketInit, err := f.svc.Bucket(bucket, f.zone)
if err != nil {
return err
}
maxLimit := int(listLimitSize)
var marker *string
for {
req := qs.ListMultipartUploadsInput{
Limit: &maxLimit,
KeyMarker: marker,
}
var resp *qs.ListMultipartUploadsOutput
resp, err = bucketInit.ListMultipartUploads(&req)
if err != nil {
return errors.Wrap(err, "clean up bucket list multipart uploads")
}
for _, upload := range resp.Uploads {
if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
age := time.Since(*upload.Created)
if age > 24*time.Hour {
fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
req := qs.AbortMultipartUploadInput{
UploadID: upload.UploadID,
}
_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
if abortErr != nil {
err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
fs.Errorf(f, "%v", err)
}
} else {
fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
}
}
}
if resp.HasMore != nil && !*resp.HasMore {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
break
} else {
marker = resp.NextKeyMarker
}
}
return err
}
// CleanUp removes all pending multipart uploads
func (f *Fs) CleanUp(ctx context.Context) (err error) {
if f.rootBucket != "" {
return f.cleanUpBucket(ctx, f.rootBucket)
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
if err != nil {
fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
err = cleanErr
}
}
return err
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
@@ -1160,10 +1090,9 @@ func (o *Object) MimeType(ctx context.Context) string {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.Object = &Object{}
_ fs.ListRer = &Fs{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -341,17 +341,12 @@ func (mu *multiUploader) abort() error {
}
// multiPartUpload upload a multiple object into QingStor
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
// Initiate an multi-part upload
func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) error {
var err error
//Initiate an multi-part upload
if err = mu.initiate(); err != nil {
return err
}
defer func() {
// Abort the transfer if returning an error
if err != nil {
_ = mu.abort()
}
}()
ch := make(chan chunk, mu.cfg.concurrency)
for i := 0; i < mu.cfg.concurrency; i++ {
@@ -405,5 +400,9 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
close(ch)
mu.wg.Wait()
// Complete Multipart Upload
return mu.complete()
err = mu.complete()
if mu.getErr() != nil || err != nil {
_ = mu.abort()
}
return err
}

View File

@@ -641,7 +641,7 @@ isn't set then "acl" is used instead.`,
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS,Ceph,Minio",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
@@ -652,22 +652,10 @@ isn't set then "acl" is used instead.`,
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_customer_algorithm",
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS,Ceph,Minio",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
@@ -675,24 +663,6 @@ isn't set then "acl" is used instead.`,
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "sse_customer_key",
Help: "If using SSE-C you must provide the secret encyption key used to encrypt/decrypt your data.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key_md5",
Help: "If using SSE-C you must provide the secret encryption key MD5 checksum.",
Provider: "AWS,Ceph,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
@@ -784,13 +754,8 @@ The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Default: false,
Advanced: true,
}, {
@@ -924,9 +889,6 @@ type Options struct {
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
@@ -946,18 +908,19 @@ type Options struct {
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
pool *pool.Pool // memory pool
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
poolMu sync.Mutex // mutex protecting memory pools map
pools map[int64]*pool.Pool // memory pools
}
// Object describes a s3 object
@@ -1136,7 +1099,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(0). // Rely on rclone's retry logic
WithMaxRetries(fs.Config.LowLevelRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
@@ -1243,20 +1206,20 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
// Setting it to 1 because in context of pacer it means 1 attempt.
pc.SetRetries(1)
f := &Fs{
name: name,
opt: *opt,
c: c,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
pacer: pc,
cache: bucket.NewCache(),
srv: fshttp.NewClient(fs.Config),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
opt.UploadConcurrency*fs.Config.Transfers,
opt.MemoryPoolUseMmap,
),
pools: make(map[int64]*pool.Pool),
}
f.setRoot(root)
@@ -1506,7 +1469,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
remote = remote[len(prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
isDirectory := strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
@@ -1738,12 +1701,12 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
if err == nil {
fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
}
if awsErr, ok := err.(awserr.Error); ok {
if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
return err
return nil
}, func() (bool, error) {
return f.bucketExists(ctx, bucket)
})
@@ -1947,16 +1910,19 @@ func (f *Fs) Hashes() hash.Set {
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
f.poolMu.Lock()
defer f.poolMu.Unlock()
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
_, ok := f.pools[size]
if !ok {
f.pools[size] = pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(f.opt.ChunkSize),
f.opt.UploadConcurrency*fs.Config.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
return f.pools[size]
}
// ------------------------------------------------------------
@@ -2123,35 +2089,22 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
Bucket: &bucket,
Key: &bucketPath,
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
httpReq, resp := o.fs.c.GetObjectRequest(&req)
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
case *fs.HTTPOption:
key, value := option.Header()
httpReq.HTTPRequest.Header.Add(key, value)
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
err = httpReq.Send()
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
@@ -2315,7 +2268,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
})
// return the memory and token
memPool.Put(buf)
memPool.Put(buf[:partSize])
tokens.Put()
if err != nil {
@@ -2403,15 +2356,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSECustomerAlgorithm != "" {
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
}
if o.fs.opt.SSECustomerKey != "" {
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
}
if o.fs.opt.SSECustomerKeyMD5 != "" {
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
@@ -2458,18 +2402,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
httpReq.Header = headers
httpReq.ContentLength = size
for _, option := range options {
switch option.(type) {
case *fs.HTTPOption:
key, value := option.Header()
httpReq.Header.Add(key, value)
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {

View File

@@ -1,153 +0,0 @@
package api
// Some api objects are duplicated with only small differences,
// it's because the returned JSON objects are very inconsistent between api calls
// AuthenticationRequest contains user credentials
type AuthenticationRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
// AuthenticationResult is returned by a call to the authentication api
type AuthenticationResult struct {
Token string `json:"token"`
Errors []string `json:"non_field_errors"`
}
// AccountInfo contains simple user properties
type AccountInfo struct {
Usage int64 `json:"usage"`
Total int64 `json:"total"`
Email string `json:"email"`
Name string `json:"name"`
}
// ServerInfo contains server information
type ServerInfo struct {
Version string `json:"version"`
}
// DefaultLibrary when none specified
type DefaultLibrary struct {
ID string `json:"repo_id"`
Exists bool `json:"exists"`
}
// CreateLibraryRequest contains the information needed to create a library
type CreateLibraryRequest struct {
Name string `json:"name"`
Description string `json:"desc"`
Password string `json:"passwd"`
}
// Library properties. Please note not all properties are going to be useful for rclone
type Library struct {
Encrypted bool `json:"encrypted"`
Owner string `json:"owner"`
ID string `json:"id"`
Size int `json:"size"`
Name string `json:"name"`
Modified int64 `json:"mtime"`
}
// CreateLibrary properties. Seafile is not consistent and returns different types for different API calls
type CreateLibrary struct {
ID string `json:"repo_id"`
Name string `json:"repo_name"`
}
// FileType is either "dir" or "file"
type FileType string
// File types
var (
FileTypeDir FileType = "dir"
FileTypeFile FileType = "file"
)
// FileDetail contains file properties (for older api v2.0)
type FileDetail struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Parent string `json:"parent_dir"`
Modified string `json:"last_modified"`
}
// DirEntries contains a list of DirEntry
type DirEntries struct {
Entries []DirEntry `json:"dirent_list"`
}
// DirEntry contains a directory entry
type DirEntry struct {
ID string `json:"id"`
Type FileType `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Path string `json:"parent_dir"`
Modified int64 `json:"mtime"`
}
// Operation is move, copy or rename
type Operation string
// Operations
var (
CopyFileOperation Operation = "copy"
MoveFileOperation Operation = "move"
RenameFileOperation Operation = "rename"
)
// FileOperationRequest is sent to the api to copy, move or rename a file
type FileOperationRequest struct {
Operation Operation `json:"operation"`
DestinationLibraryID string `json:"dst_repo"` // For copy/move operation
DestinationPath string `json:"dst_dir"` // For copy/move operation
NewName string `json:"newname"` // Only to be used by the rename operation
}
// FileInfo is returned by a server file copy/move/rename (new api v2.1)
type FileInfo struct {
Type string `json:"type"`
LibraryID string `json:"repo_id"`
Path string `json:"parent_dir"`
Name string `json:"obj_name"`
ID string `json:"obj_id"`
Size int64 `json:"size"`
}
// CreateDirRequest only contain an operation field
type CreateDirRequest struct {
Operation string `json:"operation"`
}
// DirectoryDetail contains the directory details specific to the getDirectoryDetails call
type DirectoryDetail struct {
ID string `json:"repo_id"`
Name string `json:"name"`
Path string `json:"path"`
}
// ShareLinkRequest contains the information needed to create or list shared links
type ShareLinkRequest struct {
LibraryID string `json:"repo_id"`
Path string `json:"path"`
}
// SharedLink contains the information returned by a call to shared link creation
type SharedLink struct {
Link string `json:"link"`
IsExpired bool `json:"is_expired"`
}
// BatchSourceDestRequest contains JSON parameters for sending a batch copy or move operation
type BatchSourceDestRequest struct {
SrcLibraryID string `json:"src_repo_id"`
SrcParentDir string `json:"src_parent_dir"`
SrcItems []string `json:"src_dirents"`
DstLibraryID string `json:"dst_repo_id"`
DstParentDir string `json:"dst_parent_dir"`
}

View File

@@ -1,127 +0,0 @@
package seafile
import (
"context"
"io"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a seafile object (also commonly called a file)
type Object struct {
fs *Fs // what this object is part of
id string // internal ID of object
remote string // The remote path (full path containing library name if target at root)
pathInLibrary string // Path of the object without the library name
size int64 // size of the object
modTime time.Time // modification time of the object
libraryID string // Needed to download the file
}
// ==================== Interface fs.DirEntry ====================
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote string
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns last modified time
func (o *Object) ModTime(context.Context) time.Time {
return o.modTime
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// ==================== Interface fs.ObjectInfo ====================
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// ==================== Interface fs.Object ====================
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
downloadLink, err := o.fs.getDownloadLink(ctx, o.libraryID, o.pathInLibrary)
if err != nil {
return nil, err
}
reader, err := o.fs.download(ctx, downloadLink, o.Size(), options...)
if err != nil {
return nil, err
}
return reader, nil
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
// The upload sometimes return a temporary 500 error
// We cannot use the pacer to retry uploading the file as the upload link is single use only
for retry := 0; retry <= 3; retry++ {
uploadLink, err := o.fs.getUploadLink(ctx, o.libraryID)
if err != nil {
return err
}
uploaded, err := o.fs.upload(ctx, in, uploadLink, o.pathInLibrary)
if err == ErrorInternalDuringUpload {
// This is a temporary error, try again with a new upload link
continue
}
if err != nil {
return err
}
// Set the properties from the upload back to the object
o.size = uploaded.Size
o.id = uploaded.ID
return nil
}
return ErrorInternalDuringUpload
}
// Remove this object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.deleteFile(ctx, o.libraryID, o.pathInLibrary)
}
// ==================== Optional Interface fs.IDer ====================
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
return o.id
}

View File

@@ -1,67 +0,0 @@
package seafile
import (
"fmt"
"net/url"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/pacer"
)
const (
minSleep = 100 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// Use only one pacer per server URL
var (
pacers map[string]*fs.Pacer
pacerMutex sync.Mutex
)
func init() {
pacers = make(map[string]*fs.Pacer, 0)
}
// getPacer returns the unique pacer for that remote URL
func getPacer(remote string) *fs.Pacer {
pacerMutex.Lock()
defer pacerMutex.Unlock()
remote = parseRemote(remote)
if existing, found := pacers[remote]; found {
return existing
}
pacers[remote] = fs.NewPacer(
pacer.NewDefault(
pacer.MinSleep(minSleep),
pacer.MaxSleep(maxSleep),
pacer.DecayConstant(decayConstant),
),
)
return pacers[remote]
}
// parseRemote formats a remote url into "hostname:port"
func parseRemote(remote string) string {
remoteURL, err := url.Parse(remote)
if err != nil {
// Return a default value in the very unlikely event we're not going to parse remote
fs.Infof(nil, "Cannot parse remote %s", remote)
return "default"
}
host := remoteURL.Hostname()
port := remoteURL.Port()
if port == "" {
if remoteURL.Scheme == "https" {
port = "443"
} else {
port = "80"
}
}
return fmt.Sprintf("%s:%s", host, port)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,123 +0,0 @@
package seafile
import (
"path"
"testing"
"github.com/stretchr/testify/assert"
)
type pathData struct {
configLibrary string // Library specified in the config
configRoot string // Root directory specified in the config
argumentPath string // Path given as an argument in the command line
expectedLibrary string
expectedPath string
}
// Test the method to split a library name and a path
// from a mix of configuration data and path command line argument
func TestSplitPath(t *testing.T) {
testData := []pathData{
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "",
expectedLibrary: "",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: "Library",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "",
configRoot: "",
argumentPath: path.Join("Library", "path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: "path",
},
pathData{
configLibrary: "Library",
configRoot: "",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "",
expectedLibrary: "Library",
expectedPath: "root",
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: "",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: "path",
expectedLibrary: "Library",
expectedPath: path.Join("root", "path"),
},
pathData{
configLibrary: "Library",
configRoot: "root",
argumentPath: path.Join("path", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "to", "file"),
},
pathData{
configLibrary: "Library",
configRoot: path.Join("root", "path"),
argumentPath: path.Join("subpath", "to", "file"),
expectedLibrary: "Library",
expectedPath: path.Join("root", "path", "subpath", "to", "file"),
},
}
for _, test := range testData {
fs := &Fs{
libraryName: test.configLibrary,
rootDirectory: test.configRoot,
}
libraryName, path := fs.splitPath(test.argumentPath)
assert.Equal(t, test.expectedLibrary, libraryName)
assert.Equal(t, test.expectedPath, path)
}
}
func TestSplitPathIntoSlice(t *testing.T) {
testData := map[string][]string{
"1": {"1"},
"/1": {"1"},
"/1/": {"1"},
"1/2/3": {"1", "2", "3"},
}
for input, expected := range testData {
output := splitPath(input)
assert.Equal(t, expected, output)
}
}

View File

@@ -1,17 +0,0 @@
// Test Seafile filesystem interface
package seafile_test
import (
"testing"
"github.com/rclone/rclone/backend/seafile"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSeafile:",
NilObject: (*seafile.Object)(nil),
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1429,9 +1429,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var resp *http.Response
var info api.UploadSpecification
opts := rest.Opts{
Method: "POST",
Path: "/Items(" + directoryID + ")/Upload2",
Options: options,
Method: "POST",
Path: "/Items(" + directoryID + ")/Upload2",
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, &req, &info)

View File

@@ -733,7 +733,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
return f.PutUnchecked(ctx, in, src)
default:
return nil, err
}
@@ -1320,7 +1320,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
RootURL: o.id,
Path: "/data",
NoResponse: true,
Options: options,
Body: in,
}
if size >= 0 {

View File

@@ -1285,7 +1285,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
uniquePrefix := ""
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)

View File

@@ -1,684 +0,0 @@
// +build go1.13,!plan9
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
import (
"context"
"fmt"
"io"
"log"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"golang.org/x/text/unicode/norm"
"storj.io/uplink"
)
const (
existingProvider = "existing"
newProvider = "new"
)
var satMap = map[string]string{
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(name string, configMapper configmap.Mapper) {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return
}
satellite, found := satMap[satelliteString]
if !found {
satellite = satelliteString
}
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
log.Fatalf("Couldn't create access grant: %v", err)
}
serialziedAccess, err := access.Serialize()
if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err)
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serialziedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
log.Fatalf("Invalid provider type: %s", provider)
}
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
}, {
Value: newProvider,
Help: "Create a new access grant from satellite address, API key, and passphrase.",
},
}},
{
Name: "access_grant",
Help: "Access Grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>.",
Required: false,
Provider: newProvider,
Default: "us-central-1.tardigrade.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.tardigrade.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.tardigrade.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.tardigrade.io",
Help: "Asia East 1",
},
},
},
{
Name: "api_key",
Help: "API Key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
Access string `config:"access_grant"`
SatelliteAddress string `config:"satellite_address"`
APIKey string `config:"api_key"`
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Tardigrade
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
opts Options // parsed options
features *fs.Features // optional features
access *uplink.Access // parsed scope
project *uplink.Project // project client
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
)
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
ctx := context.Background()
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
f := &Fs{
name: name,
root: root,
}
// Parse config into Options struct
err = configstruct.Set(m, &f.opts)
if err != nil {
return nil, err
}
// Parse access
var access *uplink.Access
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: access")
}
}
if access == nil {
return nil, errors.New("access not found")
}
f.access = access
f.features = (&fs.Features{
BucketBased: true,
BucketBasedRootOK: true,
}).Fill(f)
project, err := f.connect(ctx)
if err != nil {
return nil, err
}
f.project = project
// Root validation needs to check the following: If a bucket path is
// specified and exists, then the object must be a directory.
//
// NOTE: At this point this must return the filesystem object we've
// created so far even if there is an error.
if root != "" {
bucketName, bucketPath := bucket.Split(root)
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, errors.Wrap(err, "tardigrade: bucket")
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
if err == nil {
if !object.IsPrefix {
// If the root is actually a file we
// need to return the *parent*
// directory of the root instead and an
// error that the original root
// requested is a file.
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
}
}
}
return f, nil
}
// connect opens a connection to Tardigrade.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
cfg := uplink.Config{}
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, errors.Wrap(err, "tardigrade: project")
}
return
}
// absolute computes the absolute bucket name and path from the filesystem root
// and the relative path provided.
func (f *Fs) absolute(relative string) (bucketName, bucketPath string) {
bn, bp := bucket.Split(path.Join(f.root, relative))
// NOTE: Technically libuplink does not care about the encoding. It is
// happy to work with them as opaque byte sequences. However, rclone
// has a test that requires two paths with the same normalized form
// (but different un-normalized forms) to point to the same file. This
// means we have to normalize before we interact with libuplink.
return norm.NFC.String(bn), norm.NFC.String(bp)
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("FS sj://%s", f.root)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Hashes returns the supported hash types of the filesystem.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in relative into entries. The entries can
// be returned in any order but should be for a complete directory.
//
// relative should be "" to list the root, and should not have trailing
// slashes.
//
// This should return fs.ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "ls ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listObjects(ctx, relative, bucketName, bucketPath)
}
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
fs.Debugf(f, "BKT ls")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
entries = append(entries, fs.NewDir(bucket.Name, bucket.Created))
}
return entries, buckets.Err()
}
// newDirEntry creates a directory entry from an uplink object.
//
// NOTE: Getting the exact behavior required by rclone is somewhat tricky. The
// path manipulation here is necessary to cover all the different ways the
// filesystem and object could be initialized and combined.
func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry {
if object.IsPrefix {
// . The entry must include the relative path as its prefix. Depending on
// | what is being listed and how the filesystem root was initialized the
// | relative path may be empty (and so we use path joining here to ensure
// | we don't end up with an empty path segment).
// |
// | . Remove the prefix used during listing.
// | |
// | | . Remove the trailing slash.
// | | |
// v v v
return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created)
}
return newObjectFromUplink(f, relative, object)
}
func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
fs.Debugf(f, "opts %+v", opts)
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item()))
}
err = objects.Err()
if err != nil {
return nil, err
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting from dir
// recursively into out.
//
// relative should be "" to start from the root, and should not have trailing
// slashes.
//
// This should return ErrDirNotFound if the directory isn't found.
//
// It should call callback for each tranche of entries read. These need not be
// returned in any particular order. If callback returns an error then the
// listing will stop immediately.
//
// Don't implement this unless you have a more efficient way of listing
// recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "ls -R ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
defer func() {
if errors.Is(err, uplink.ErrBucketNotFound) {
err = fs.ErrorDirNotFound
}
}()
if bucketName == "" {
if bucketPath != "" {
return fs.ErrorListBucketRequired
}
return f.listBucketsR(ctx, callback)
}
return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback)
}
func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "BKT ls -R")
buckets := f.project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback)
if err != nil {
return err
}
}
return buckets.Err()
}
func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath)
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
Recursive: true,
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
for objects.Next() {
object := objects.Item()
err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)})
if err != nil {
return err
}
}
err = objects.Err()
if err != nil {
return err
}
return nil
}
// NewObject finds the Object at relative. If it can't be found it returns the
// error ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) {
fs.Debugf(f, "stat ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
object, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
fs.Debugf(f, "err: %+v", err)
if errors.Is(err, uplink.ErrObjectNotFound) {
return nil, fs.ErrorObjectNotFound
}
return nil, err
}
return newObjectFromUplink(f, relative, object), nil
}
// Put in to the remote path with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Put should
// either return an error or upload it properly (rather than e.g. calling
// panic).
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size())
// Reject options we don't support.
for _, option := range options {
if option.Mandatory() {
fs.Errorf(f, "Unsupported mandatory option: %v", option)
return nil, errors.New("unsupported mandatory option")
}
}
bucketName, bucketPath := f.absolute(src.Remote())
upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
aerr := upload.Abort()
if aerr != nil {
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
}
}
}()
err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{
"rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano),
})
if err != nil {
return nil, err
}
_, err = io.Copy(upload, in)
if err != nil {
err = fserrors.RetryError(err)
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
return nil, err
}
err = upload.Commit()
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
// Rclone assumes the backend will create the bucket if not existing yet.
// Here we create the bucket and return a retry error for rclone to retry the upload.
_, err = f.project.EnsureBucket(ctx, bucketName)
if err != nil {
return nil, err
}
err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
}
return nil, err
}
return newObjectFromUplink(f, "", upload.Info()), nil
}
// PutStream uploads to the remote path with the modTime given of indeterminate
// size.
//
// May create the object even if it returns an error - if so will return the
// object and the error, otherwise will return nil and the error.
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "mkdir -p ./%s", relative)
bucketName, _ := f.absolute(relative)
_, err = f.project.EnsureBucket(ctx, bucketName)
return err
}
// Rmdir removes the directory (container, bucket)
//
// NOTE: Despite code documentation to the contrary, this method should not
// return an error if the directory does not exist.
func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) {
fs.Debugf(f, "rmdir ./%s", relative)
bucketName, bucketPath := f.absolute(relative)
if bucketPath != "" {
// If we can successfully stat it, then it is an object (and not a prefix).
_, err := f.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
if errors.Is(err, uplink.ErrObjectNotFound) {
// At this point we know it is not an object,
// but we don't know if it is a prefix for one.
//
// We check this by doing a listing and if we
// get any results back, then we know this is a
// valid prefix (which implies the directory is
// not empty).
opts := &uplink.ListObjectsOptions{
Prefix: newPrefix(bucketPath),
System: true,
Custom: true,
}
objects := f.project.ListObjects(ctx, bucketName, opts)
if objects.Next() {
return fs.ErrorDirectoryNotEmpty
}
return objects.Err()
}
return err
}
return fs.ErrorIsFile
}
_, err = f.project.DeleteBucket(ctx, bucketName)
if err != nil {
if errors.Is(err, uplink.ErrBucketNotFound) {
return fs.ErrorDirNotFound
}
if errors.Is(err, uplink.ErrBucketNotEmpty) {
return fs.ErrorDirectoryNotEmpty
}
return err
}
return nil
}
// newPrefix returns a new prefix for listing conforming to the libuplink
// requirements. In particular, libuplink requires a trailing slash for
// listings, but rclone does not always provide one. Further, depending on how
// the path was initially path normalization may have removed it (e.g. a
// trailing slash from the CLI is removed before it ever get's to the backend
// code).
func newPrefix(prefix string) string {
if prefix == "" {
return prefix
}
if prefix[len(prefix)-1] == '/' {
return prefix
}
return prefix + "/"
}

View File

@@ -1,204 +0,0 @@
// +build go1.13,!plan9
package tardigrade
import (
"context"
"io"
"path"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"golang.org/x/text/unicode/norm"
"storj.io/uplink"
)
// Object describes a Tardigrade object
type Object struct {
fs *Fs
absolute string
size int64
created time.Time
modified time.Time
}
// Check the interfaces are satisfied.
var _ fs.Object = &Object{}
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
// Attempt to use the modified time from the metadata. Otherwise
// fallback to the server time.
modified := object.System.Created
if modifiedStr, ok := object.Custom["rclone:mtime"]; ok {
var err error
modified, err = time.Parse(time.RFC3339Nano, modifiedStr)
if err != nil {
modified = object.System.Created
}
}
bucketName, _ := bucket.Split(path.Join(f.root, relative))
return &Object{
fs: f,
absolute: norm.NFC.String(bucketName + "/" + object.Key),
size: object.System.ContentLength,
created: object.System.Created,
modified: modified,
}
}
// String returns a description of the Object
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
// It is possible that we have an empty root (meaning the filesystem is
// rooted at the project level). In this case the relative path is just
// the full absolute path to the object (including the bucket name).
if o.fs.root == "" {
return o.absolute
}
// At this point we know that the filesystem itself is at least a
// bucket name (and possibly a prefix path).
//
// . This is necessary to remove the slash.
// |
// v
return o.absolute[len(o.fs.root)+1:]
}
// ModTime returns the modification date of the file
// It should return a best guess if one isn't available
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modified
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (_ string, err error) {
fs.Debugf(o, "%s", ty)
return "", hash.ErrUnsupported
}
// Storable says whether this object can be stored
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the metadata on the object to set the modification date
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
fs.Debugf(o, "touch -d %q sj://%s", t, o.absolute)
return fs.ErrorCantSetModTime
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadCloser, err error) {
fs.Debugf(o, "cat sj://%s # %+v", o.absolute, options)
bucketName, bucketPath := bucket.Split(o.absolute)
// Convert the semantics of HTTP range headers to an offset and length
// that libuplink can use.
var (
offset int64 = 0
length int64 = -1
)
for _, option := range options {
switch opt := option.(type) {
case *fs.RangeOption:
s := opt.Start >= 0
e := opt.End >= 0
switch {
case s && e:
offset = opt.Start
length = (opt.End + 1) - opt.Start
case s && !e:
offset = opt.Start
case !s && e:
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
return nil, err
}
offset = object.System.ContentLength - opt.End
length = opt.End
}
case *fs.SeekOption:
offset = opt.Offset
default:
if option.Mandatory() {
fs.Errorf(o, "Unsupported mandatory option: %v", option)
return nil, errors.New("unsupported mandatory option")
}
}
}
fs.Debugf(o, "range %d + %d", offset, length)
return o.fs.project.DownloadObject(ctx, bucketName, bucketPath, &uplink.DownloadOptions{
Offset: offset,
Length: length,
})
}
// Update in to the object with the modTime given of the given size
//
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fs.Debugf(o, "cp input ./%s %+v", src.Remote(), options)
oNew, err := o.fs.Put(ctx, in, src, options...)
if err == nil {
*o = *(oNew.(*Object))
}
return err
}
// Remove this object.
func (o *Object) Remove(ctx context.Context) (err error) {
fs.Debugf(o, "rm sj://%s", o.absolute)
bucketName, bucketPath := bucket.Split(o.absolute)
_, err = o.fs.project.DeleteObject(ctx, bucketName, bucketPath)
return err
}

View File

@@ -1,19 +0,0 @@
// +build go1.13,!plan9
// Test Tardigrade filesystem interface
package tardigrade_test
import (
"testing"
"github.com/rclone/rclone/backend/tardigrade"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestTardigrade:",
NilObject: (*tardigrade.Object)(nil),
})
}

View File

@@ -1,3 +0,0 @@
// +build !go1.13 plan9
package tardigrade

View File

@@ -13,7 +13,7 @@ func init() {
registerPolicy("epall", &EpAll{})
}
// EpAll stands for existing path, all
// EpAll stands for existing path, All
// Action category: apply to all found.
// Create category: apply to all found.
// Search category: same as epff.

View File

@@ -11,7 +11,7 @@ func init() {
registerPolicy("lus", &Lus{})
}
// Lus stands for least used space
// Lus stands for least free space
// Search category: same as eplus.
// Action category: same as eplus.
// Create category: Pick the drive with the least used space.

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
)
// Register with Fs
@@ -48,7 +47,7 @@ func init() {
Default: "ff",
}, {
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.",
Help: "Cache time of usage and free space (in seconds)",
Required: true,
Default: 120,
}},
@@ -201,27 +200,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
o := srcObj.UnWrap()
su := o.UpstreamFs()
if su.Features().Copy == nil {
u := o.UpstreamFs()
do := u.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
return nil, fs.ErrorCantCopy
}
if !du.IsCreatable() {
if !u.IsCreatable() {
return nil, fs.ErrorPermissionDenied
}
co, err := du.Features().Copy(ctx, o, remote)
co, err := do(ctx, o, remote)
if err != nil || co == nil {
return nil, err
}
wo, err := f.wrapEntries(du.WrapObject(co))
wo, err := f.wrapEntries(u.WrapObject(co))
return wo.(*Object), err
}
@@ -252,28 +243,18 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
objs := make([]*upstream.Object, len(entries))
errs := Errors(make([]error, len(entries)))
multithread(len(entries), func(i int) {
su := entries[i].UpstreamFs()
u := entries[i].UpstreamFs()
o, ok := entries[i].(*upstream.Object)
if !ok {
errs[i] = errors.Wrap(fs.ErrorNotAFile, su.Name())
errs[i] = errors.Wrap(fs.ErrorNotAFile, u.Name())
return
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
du = u
}
}
if du == nil {
errs[i] = errors.Wrap(fs.ErrorCantMove, su.Name()+":"+remote)
return
}
mo, err := du.Features().Move(ctx, o.UnWrap(), remote)
mo, err := u.Features().Move(ctx, o.UnWrap(), remote)
if err != nil || mo == nil {
errs[i] = errors.Wrap(err, su.Name())
errs[i] = errors.Wrap(err, u.Name())
return
}
objs[i] = du.WrapObject(mo)
objs[i] = u.WrapObject(mo)
})
var en []upstream.Entry
for _, o := range objs {
@@ -316,7 +297,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
su := upstreams[i]
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
if u.RootFs.Root() == su.RootFs.Root() {
du = u
}
}
@@ -580,67 +561,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.mergeDirEntries(entriess)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
for _, u := range f.upstreams {
if u.Features().ListR == nil {
return errors.Errorf("ListR Unsupported for branch: %s", u.Name())
}
}
var entriess [][]upstream.Entry
errs := Errors(make([]error, len(f.upstreams)))
var mutex sync.Mutex
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
err := u.Features().ListR(ctx, dir, func(entries fs.DirEntries) error {
uEntries := make([]upstream.Entry, len(entries))
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
mutex.Lock()
entriess = append(entriess, uEntries)
mutex.Unlock()
return nil
})
if err != nil {
errs[i] = errors.Wrap(err, u.Name())
return
}
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
if errors.Cause(e) == fs.ErrorDirNotFound {
return nil
}
return e
})
if len(errs) == 0 {
return fs.ErrorDirNotFound
}
return errs.Err()
}
entries, err := f.mergeDirEntries(entriess)
if err != nil {
return err
}
return callback(entries)
}
// NewObject creates a new remote union file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
objs := make([]*upstream.Object, len(f.upstreams))
@@ -810,7 +730,36 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
GetTier: true,
}).Fill(f)
for _, f := range upstreams {
features = features.Mask(f) // Mask all upstream fs
if !f.IsWritable() {
continue
}
features = features.Mask(f) // Mask all writable upstream fs
}
// Really need the union of all upstreams for these, so
// re-instate and calculate separately.
features.ChangeNotify = f.ChangeNotify
features.DirCacheFlush = f.DirCacheFlush
// FIXME maybe should be masking the bools here?
// Clear ChangeNotify and DirCacheFlush if all are nil
clearChangeNotify := true
clearDirCacheFlush := true
for _, u := range f.upstreams {
uFeatures := u.Features()
if uFeatures.ChangeNotify != nil {
clearChangeNotify = false
}
if uFeatures.DirCacheFlush != nil {
clearDirCacheFlush = false
}
}
if clearChangeNotify {
features.ChangeNotify = nil
}
if clearDirCacheFlush {
features.DirCacheFlush = nil
}
f.features = features
@@ -857,5 +806,4 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
)

View File

@@ -1135,7 +1135,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
NoResponse: true,
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src),
Options: options,
}
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
opts.ExtraHeaders = map[string]string{}

View File

@@ -1065,7 +1065,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string) (err error) {
// prepare upload
var resp *http.Response
var ur api.AsyncInfo
@@ -1073,7 +1073,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeT
Method: "GET",
Path: "/resources/upload",
Parameters: url.Values{},
Options: options,
}
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
@@ -1122,7 +1121,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
//upload file
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src))
if err != nil {
return err
}

View File

@@ -1,3 +0,0 @@
# Email addresses to ignore in the git log when making the authors.md file
<nick@raig-wood.com>
<anaghk.dos@gmail.com>

View File

@@ -182,7 +182,6 @@ func compileArch(version, goos, goarch, dir string) bool {
args := []string{
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-i",
"-o", output,
"-tags", *tags,

View File

@@ -4,7 +4,6 @@ Make backend documentation
"""
import os
import io
import subprocess
marker = "<!--- autogenerated options"
@@ -20,11 +19,6 @@ def output_docs(backend, out):
out.flush()
subprocess.check_call(["rclone", "help", "backend", backend], stdout=out)
def output_backend_tool_docs(backend, out):
"""Output documentation for backend tool to out"""
out.flush()
subprocess.call(["rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
def alter_doc(backend):
"""Alter the documentation for backend"""
doc_file = "docs/content/"+backend+".md"
@@ -41,7 +35,6 @@ def alter_doc(backend):
start_full = start + " - DO NOT EDIT, instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs -->\n" % (backend, backend)
out_file.write(start_full)
output_docs(backend, out_file)
output_backend_tool_docs(backend, out_file)
out_file.write(stop+" -->\n")
altered = True
if not in_docs:

View File

@@ -54,10 +54,8 @@ docs = [
"pcloud.md",
"premiumizeme.md",
"putio.md",
"seafile.md",
"sftp.md",
"sugarsync.md",
"tardigrade.md",
"union.md",
"webdav.md",
"yandex.md",

View File

@@ -7,15 +7,17 @@ import re
import subprocess
AUTHORS = "docs/content/authors.md"
IGNORE = "bin/.ignore-emails"
IGNORE = [ "nick@raig-wood.com" ]
def load(filename):
def load():
"""
returns a set of emails already in the file
returns a set of emails already in authors.md
"""
with open(filename) as fd:
with open(AUTHORS) as fd:
authors = fd.read()
return set(re.findall(r"<(.*?)>", authors))
emails = set(re.findall(r"<(.*?)>", authors))
emails.update(IGNORE)
return emails
def add_email(name, email):
"""
@@ -30,9 +32,7 @@ def main():
out = subprocess.check_output(["git", "log", '--reverse', '--format=%an|%ae', "master"])
out = out.decode("utf-8")
ignored = load(IGNORE)
previous = load(AUTHORS)
previous.update(ignored)
previous = load()
for line in out.split("\n"):
line = line.strip()
if line == "":

View File

@@ -6,7 +6,6 @@ import (
_ "github.com/rclone/rclone/cmd"
_ "github.com/rclone/rclone/cmd/about"
_ "github.com/rclone/rclone/cmd/authorize"
_ "github.com/rclone/rclone/cmd/backend"
_ "github.com/rclone/rclone/cmd/cachestats"
_ "github.com/rclone/rclone/cmd/cat"
_ "github.com/rclone/rclone/cmd/check"

View File

@@ -1,169 +0,0 @@
package backend
import (
"context"
"encoding/json"
"fmt"
"os"
"sort"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/rc"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
options []string
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
}
var commandDefinition = &cobra.Command{
Use: "backend <command> remote:path [opts] <args>",
Short: `Run a backend specific command.`,
Long: `
This runs a backend specific command. The commands themselves (except
for "help" and "features") are defined by the backends and you should
see the backend docs for definitions.
You can discover what commands a backend implements by using
rclone backend help remote:
rclone backend help <backendname>
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
for more info).
rclone backend features remote:
Pass options to the backend command with -o. This should be key=value or key, eg:
rclone backend stats remote:path stats -o format=json -o long
Pass arguments to the backend by placing them on the end of the line
rclone backend cleanup remote:path file1 file2 file3
Note to run these commands on a running backend then see
[backend/command](/rc/#backend/command) in the rc docs.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1E6, command, args)
name, remote := args[0], args[1]
cmd.Run(false, false, command, func() error {
// show help if remote is a backend name
if name == "help" {
fsInfo, err := fs.Find(remote)
if err == nil {
return showHelp(fsInfo)
}
}
// Create remote
fsInfo, configName, fsPath, config, err := fs.ConfigFs(remote)
if err != nil {
return err
}
f, err := fsInfo.NewFs(configName, fsPath, config)
if err != nil {
return err
}
// Run the command
var out interface{}
switch name {
case "help":
return showHelp(fsInfo)
case "features":
out = operations.GetFsInfo(f)
default:
doCommand := f.Features().Command
if doCommand == nil {
return errors.Errorf("%v: doesn't support backend commands", f)
}
arg := args[2:]
opt := rc.ParseOptions(options)
out, err = doCommand(context.Background(), name, arg, opt)
}
if err != nil {
return errors.Wrapf(err, "command %q failed", name)
}
// Output the result
switch x := out.(type) {
case nil:
case string:
fmt.Println(out)
case []string:
for line := range x {
fmt.Println(line)
}
default:
// Write indented JSON to the output
enc := json.NewEncoder(os.Stdout)
enc.SetIndent("", "\t")
err = enc.Encode(out)
if err != nil {
return errors.Wrap(err, "failed to write JSON")
}
}
return nil
})
return nil
},
}
// show help for a backend
func showHelp(fsInfo *fs.RegInfo) error {
cmds := fsInfo.CommandHelp
name := fsInfo.Name
if len(cmds) == 0 {
return errors.Errorf("%s backend has no commands", name)
}
fmt.Printf("### Backend commands\n\n")
fmt.Printf(`Here are the commands specific to the %s backend.
Run them with with
rclone backend COMMAND remote:
The help below will explain what arguments each command takes.
See [the "rclone backend" command](/commands/rclone_backend/) for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend/command).
`, name)
for _, cmd := range cmds {
fmt.Printf("#### %s\n\n", cmd.Name)
fmt.Printf("%s\n\n", cmd.Short)
fmt.Printf(" rclone backend %s remote: [options] [<arguments>+]\n\n", cmd.Name)
if cmd.Long != "" {
fmt.Printf("%s\n\n", cmd.Long)
}
if len(cmd.Opts) != 0 {
fmt.Printf("Options:\n\n")
ks := []string{}
for k := range cmd.Opts {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
v := cmd.Opts[k]
fmt.Printf("- %q: %s\n", k, v)
}
fmt.Printf("\n")
}
}
return nil
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/spf13/cobra"
)
@@ -23,10 +22,8 @@ var commandDefinition = &cobra.Command{
Long: `
Print cache stats for a remote in JSON format
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
fsrc := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {

View File

@@ -13,7 +13,6 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"runtime"
"runtime/pprof"
@@ -67,7 +66,6 @@ const (
exitCodeNoRetryError
exitCodeFatalError
exitCodeTransferExceeded
exitCodeNoFilesTransferred
)
// ShowVersion prints the version to stdout
@@ -314,7 +312,6 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
}
}
resolveExitCode(cmdErr)
}
// CheckArgs checks there are enough arguments and prints a message if not
@@ -433,11 +430,6 @@ func initConfig() {
func resolveExitCode(err error) {
atexit.Run()
if err == nil {
if fs.Config.ErrorOnNoTransfer {
if accounting.GlobalStats().GetTransfers() == 0 {
os.Exit(exitCodeNoFilesTransferred)
}
}
os.Exit(exitCodeSuccess)
}
@@ -504,59 +496,9 @@ func AddBackendFlags() {
}
}
// If rclone was invoked as "rclone.mount" act as if we are a mount
// script.
func invokedAsMount() {
if filepath.Base(os.Args[0]) != "rclone.mount" {
return
}
log.Printf("Rclone invoked as %v", os.Args)
if len(os.Args) < 3 {
log.Printf("Too few arguments - need 3")
os.Exit(1)
}
remote, mountpoint := os.Args[1], os.Args[2]
newArgs := []string{"rclone", "mount"}
// Do a quick and nasty job parsing the arguments
option := false
for _, arg := range os.Args[1:] {
if arg == "-o" {
option = true
} else if option {
values := strings.Split(arg, ",")
for _, value := range values {
switch {
case value == "rw":
case value == "ro":
case value == "dev":
case value == "suid":
case value == "exec":
case value == "auto":
case value == "nodev":
case value == "nosuid":
case value == "noexec":
case value == "noauto":
case strings.HasPrefix(value, "x-systemd"):
default:
newArgs = append(newArgs, "--"+value)
}
}
} else {
log.Printf("Ignoring argument %q", arg)
}
}
newArgs = append(newArgs, remote, mountpoint)
log.Printf("Translated args: %v", newArgs)
os.Args = newArgs
}
// Main runs rclone interpreting flags and commands out of os.Args
func Main() {
rand.Seed(time.Now().Unix())
invokedAsMount()
setupRootCommand(Root)
AddBackendFlags()
if err := Root.Execute(); err != nil {

View File

@@ -251,14 +251,7 @@ func (fsys *FS) Readdir(dirPath string,
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
if usingReaddirPlus {
// We have called host.SetCapReaddirPlus() so supply the stat information
var stat fuse.Stat_t
_ = fsys.stat(node, &stat) // not capable of returning an error
fill(name, &stat, 0)
} else {
fill(name, nil, 0)
}
fill(name, nil, 0)
}
}
itemsRead = len(items)
@@ -275,15 +268,25 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096
total, _, free := fsys.VFS.Statfs()
stat.Blocks = uint64(total) / blockSize // Total data blocks in file system.
stat.Bfree = uint64(free) / blockSize // Free blocks in file system.
stat.Bavail = stat.Bfree // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
const fsBlocks = (1 << 50) / blockSize
stat.Blocks = fsBlocks // Total data blocks in file system.
stat.Bfree = fsBlocks // Free blocks in file system.
stat.Bavail = fsBlocks // Free blocks in file system if you're not root.
stat.Files = 1e9 // Total files in file system.
stat.Ffree = 1e9 // Free files in file system.
stat.Bsize = blockSize // Block size
stat.Namemax = 255 // Maximum file name length?
stat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
total, used, free := fsys.VFS.Statfs()
if total >= 0 {
stat.Blocks = uint64(total) / blockSize
}
if used >= 0 {
stat.Bfree = stat.Blocks - uint64(used)/blockSize
}
if free >= 0 {
stat.Bavail = uint64(free) / blockSize
}
mountlib.ClipBlocks(&stat.Blocks)
mountlib.ClipBlocks(&stat.Bfree)
mountlib.ClipBlocks(&stat.Bavail)
@@ -368,7 +371,12 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
if errc != 0 {
return errc
}
n, err := handle.WriteAt(buff, ofst)
var err error
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
n, err = handle.WriteAt(buff, ofst)
} else {
n, err = handle.Write(buff)
}
if err != nil {
return translateError(err)
}
@@ -545,11 +553,11 @@ func translateError(err error) (errc int) {
switch errors.Cause(err) {
case vfs.OK:
return 0
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
case vfs.ENOENT:
return -fuse.ENOENT
case vfs.EEXIST, fs.ErrorDirExists:
case vfs.EEXIST:
return -fuse.EEXIST
case vfs.EPERM, fs.ErrorPermissionDenied:
case vfs.EPERM:
return -fuse.EPERM
case vfs.ECLOSED:
return -fuse.EBADF
@@ -561,7 +569,7 @@ func translateError(err error) (errc int) {
return -fuse.EBADF
case vfs.EROFS:
return -fuse.EROFS
case vfs.ENOSYS, fs.ErrorNotImplemented:
case vfs.ENOSYS:
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL

View File

@@ -26,13 +26,6 @@ import (
"github.com/rclone/rclone/vfs/vfsflags"
)
const (
// SetCapReaddirPlus informs the host that the hosted file system has the readdir-plus
// capability [Windows only]. A file system that has the readdir-plus capability can send
// full stat information during Readdir, thus avoiding extraneous Getattr calls.
usingReaddirPlus = runtime.GOOS == "windows"
)
func init() {
name := "cmount"
if runtime.GOOS == "windows" {
@@ -149,10 +142,6 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
// Create underlying FS
fsys := NewFS(f)
host := fuse.NewFileSystemHost(fsys)
if usingReaddirPlus {
host.SetCapReaddirPlus(true)
}
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
// Create options
options := mountOptions(f.Name()+":"+f.Root(), mountpoint)

View File

@@ -11,9 +11,9 @@ package cmount
import (
"testing"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/rclone/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
vfstest.RunTests(t, false, mount)
mounttest.RunTests(t, mount)
}

View File

@@ -66,8 +66,8 @@ option when copying a small number of files into a large destination
can speed transfers up greatly.
For example, if you have many files in /path/to/src but only a few of
them change every day, you can copy all the files which have changed
recently very efficiently like this:
them change every day, you can to copy all the files which have
changed recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:

View File

@@ -15,14 +15,12 @@ import (
var (
autoFilename = false
stdout = false
noClobber = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
}
@@ -37,9 +35,6 @@ Setting --auto-filename will cause the file name to be retreived from
the from URL (after any redirections) and used in the destination
path.
Setting --no-clobber will prevent overwriting file on the
destination if there is one with the same name.
Setting --stdout or making the output file name "-" will cause the
output to be written to standard output.
`,
@@ -64,7 +59,7 @@ output to be written to standard output.
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, noClobber)
_, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename)
}
return err
})

View File

@@ -110,6 +110,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
fs.Errorf(src, err.Error())
return true, false
}
fs.Debugf(src, "OK")
return false, false
}

View File

@@ -60,7 +60,7 @@ use it like this
}
// cryptDecode returns the unencrypted file name
func cryptDecode(cipher *crypt.Cipher, args []string) error {
func cryptDecode(cipher crypt.Cipher, args []string) error {
output := ""
for _, encryptedFileName := range args {
@@ -78,7 +78,7 @@ func cryptDecode(cipher *crypt.Cipher, args []string) error {
}
// cryptEncode returns the encrypted file name
func cryptEncode(cipher *crypt.Cipher, args []string) error {
func cryptEncode(cipher crypt.Cipher, args []string) error {
output := ""
for _, fileName := range args {

View File

@@ -6,7 +6,6 @@ import (
"github.com/rclone/rclone/backend/dropbox"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -24,11 +23,9 @@ hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
cmd.Run(false, false, command, func() error {
return operations.HashLister(context.Background(), dropbox.DbHashType, fsrc, os.Stdout)
})

View File

@@ -4,19 +4,12 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
rmdirs = false
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact")
}
var commandDefinition = &cobra.Command{
@@ -30,8 +23,6 @@ filters so can be used to selectively delete files.
alone. If you want to delete a directory and all of its contents use
` + "`" + `rclone purge` + "`" + `
If you supply the --rmdirs flag, it will remove all empty directories along with it.
Eg delete all files bigger than 100MBytes
Check what would be deleted first (use either)
@@ -50,14 +41,7 @@ delete all files bigger than 100MBytes.
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
cmd.Run(true, false, command, func() error {
if err := operations.Delete(context.Background(), fsrc); err != nil {
return err
}
if rmdirs {
fdst := cmd.NewFsDir(args)
return operations.Rmdirs(context.Background(), fdst, "", true)
}
return nil
return operations.Delete(context.Background(), fsrc)
})
},
}

View File

@@ -1,44 +0,0 @@
package genautocomplete
import (
"log"
"github.com/rclone/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
completionDefinition.AddCommand(fishCommandDefinition)
}
var fishCommandDefinition = &cobra.Command{
Use: "fish [output_file]",
Short: `Output fish completion script for rclone.`,
Long: `
Generates a fish autocompletion script for rclone.
This writes to /etc/fish/completions/rclone.fish by default so will
probably need to be run with sudo or as root, eg
sudo rclone genautocomplete fish
Logout and login again to use the autocompletion scripts, or source
them directly
. /etc/fish/completions/rclone.fish
If you supply a command line argument the script will be written
there.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/fish/completions/rclone.fish"
if len(args) > 0 {
out = args[0]
}
err := cmd.Root.GenFishCompletionFile(out, true)
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -33,16 +33,3 @@ func TestCompletionZsh(t *testing.T) {
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionFish(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_fish")
assert.NoError(t, err)
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
bs, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}

View File

@@ -132,13 +132,13 @@ Eg
"this file contains a comma, in the file name.txt",6
Note that the --absolute parameter is useful for making lists of files
to pass to an rclone copy with the --files-from-raw flag.
to pass to an rclone copy with the --files-from flag.
For example to find all the files modified within one day and copy
those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
rclone copy --files-from new_files /path/to/local remote:path
` + lshelp.Help,
Run: func(command *cobra.Command, args []string) {
@@ -185,7 +185,6 @@ func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error {
case 'h':
list.AddHash(hashType)
opt.ShowHash = true
opt.HashTypes = []string{hashType.String()}
case 'i':
list.AddID()
case 'm':

View File

@@ -29,7 +29,6 @@ func init() {
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated).")
}
var commandDefinition = &cobra.Command{
@@ -59,25 +58,17 @@ The output is an array of Items, where each Item looks like this
"Tier" : "hot",
}
If --hash is not specified the Hashes property won't be emitted. The
types of hash can be specified with the --hash-type parameter (which
may be repeated). If --hash-type is set then it implies --hash.
If --hash is not specified the Hashes property won't be emitted.
If --no-modtime is specified then ModTime will be blank. This can
speed things up on remotes where reading the ModTime takes an extra
request (eg s3, swift).
If --no-modtime is specified then ModTime will be blank. This can speed things up on remotes where reading the ModTime takes an extra request (eg s3, swift).
If --no-mimetype is specified then MimeType will be blank. This can
speed things up on remotes where reading the MimeType takes an extra
request (eg s3, swift).
If --no-mimetype is specified then MimeType will be blank. This can speed things up on remotes where reading the MimeType takes an extra request (eg s3, swift).
If --encrypted is not specified the Encrypted won't be emitted.
If --dirs-only is not specified files in addition to directories are
returned
If --dirs-only is not specified files in addition to directories are returned
If --files-only is not specified directories in addition to the files
will be returned.
If --files-only is not specified directories in addition to the files will be returned.
The Path field will only show folders below the remote path being listed.
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"

View File

@@ -76,23 +76,13 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
return nil, translateError(err)
}
resp.EntryValid = mountlib.AttrTimeout
// Check the mnode to see if it has a fuse Node cached
// We must return the same fuse nodes for vfs Nodes
node, ok := mnode.Sys().(fusefs.Node)
if ok {
return node, nil
}
switch x := mnode.(type) {
case *vfs.File:
node = &File{x}
return &File{x}, nil
case *vfs.Dir:
node = &Dir{x}
default:
panic("bad type")
return &Dir{x}, nil
}
// Cache the node for later
mnode.SetSys(node)
return node, nil
panic("bad type")
}
// Check interface satisfied
@@ -139,9 +129,7 @@ func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.Cr
if err != nil {
return nil, nil, translateError(err)
}
node = &File{file}
file.SetSys(node) // cache the FUSE node for later
return node, &FileHandle{fh}, err
return &File{file}, &FileHandle{fh}, err
}
var _ fusefs.NodeMkdirer = (*Dir)(nil)
@@ -153,9 +141,7 @@ func (d *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (node fusefs.No
if err != nil {
return nil, translateError(err)
}
node = &Dir{dir}
dir.SetSys(node) // cache the FUSE node for later
return node, nil
return &Dir{dir}, nil
}
var _ fusefs.NodeRemover = (*Dir)(nil)

View File

@@ -54,15 +54,25 @@ var _ fusefs.FSStatfser = (*FS)(nil)
func (f *FS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
defer log.Trace("", "")("stat=%+v, err=%v", resp, &err)
const blockSize = 4096
total, _, free := f.VFS.Statfs()
resp.Blocks = uint64(total) / blockSize // Total data blocks in file system.
resp.Bfree = uint64(free) / blockSize // Free blocks in file system.
resp.Bavail = resp.Bfree // Free blocks in file system if you're not root.
resp.Files = 1e9 // Total files in file system.
resp.Ffree = 1e9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
const fsBlocks = (1 << 50) / blockSize
resp.Blocks = fsBlocks // Total data blocks in file system.
resp.Bfree = fsBlocks // Free blocks in file system.
resp.Bavail = fsBlocks // Free blocks in file system if you're not root.
resp.Files = 1e9 // Total files in file system.
resp.Ffree = 1e9 // Free files in file system.
resp.Bsize = blockSize // Block size
resp.Namelen = 255 // Maximum file name length?
resp.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
total, used, free := f.VFS.Statfs()
if total >= 0 {
resp.Blocks = uint64(total) / blockSize
}
if used >= 0 {
resp.Bfree = resp.Blocks - uint64(used)/blockSize
}
if free >= 0 {
resp.Bavail = uint64(free) / blockSize
}
mountlib.ClipBlocks(&resp.Blocks)
mountlib.ClipBlocks(&resp.Bfree)
mountlib.ClipBlocks(&resp.Bavail)
@@ -77,11 +87,11 @@ func translateError(err error) error {
switch errors.Cause(err) {
case vfs.OK:
return nil
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
case vfs.ENOENT:
return fuse.ENOENT
case vfs.EEXIST, fs.ErrorDirExists:
case vfs.EEXIST:
return fuse.EEXIST
case vfs.EPERM, fs.ErrorPermissionDenied:
case vfs.EPERM:
return fuse.EPERM
case vfs.ECLOSED:
return fuse.Errno(syscall.EBADF)
@@ -93,7 +103,7 @@ func translateError(err error) error {
return fuse.Errno(syscall.EBADF)
case vfs.EROFS:
return fuse.Errno(syscall.EROFS)
case vfs.ENOSYS, fs.ErrorNotImplemented:
case vfs.ENOSYS:
return fuse.ENOSYS
case vfs.EINVAL:
return fuse.Errno(syscall.EINVAL)

View File

@@ -5,6 +5,7 @@ package mount
import (
"context"
"io"
"os"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
@@ -41,7 +42,12 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
// Write data to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
var n int
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
} else {
n, err = fh.Handle.Write(req.Data)
}
if err != nil {
return translateError(err)
}

View File

@@ -6,12 +6,12 @@ import (
"runtime"
"testing"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/rclone/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
if runtime.NumCPU() <= 2 {
t.Skip("FIXME skipping mount tests as they lock up on <= 2 CPUs - See: https://github.com/rclone/rclone/issues/3154")
}
vfstest.RunTests(t, false, mount)
mounttest.RunTests(t, mount)
}

View File

@@ -6,6 +6,7 @@ import (
"context"
"fmt"
"io"
"os"
"syscall"
fusefs "github.com/hanwen/go-fuse/v2/fs"
@@ -73,7 +74,11 @@ func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written
var n int
var err error
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
n, err = f.h.WriteAt(data, off)
if f.h.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || f.h.Node().Mode()&os.ModeAppend == 0 {
n, err = f.h.WriteAt(data, off)
} else {
n, err = f.h.Write(data)
}
return uint32(n), translateError(err)
}

View File

@@ -105,11 +105,11 @@ func translateError(err error) syscall.Errno {
switch errors.Cause(err) {
case vfs.OK:
return 0
case vfs.ENOENT, fs.ErrorDirNotFound, fs.ErrorObjectNotFound:
case vfs.ENOENT:
return syscall.ENOENT
case vfs.EEXIST, fs.ErrorDirExists:
case vfs.EEXIST:
return syscall.EEXIST
case vfs.EPERM, fs.ErrorPermissionDenied:
case vfs.EPERM:
return syscall.EPERM
case vfs.ECLOSED:
return syscall.EBADF
@@ -121,7 +121,7 @@ func translateError(err error) syscall.Errno {
return syscall.EBADF
case vfs.EROFS:
return syscall.EROFS
case vfs.ENOSYS, fs.ErrorNotImplemented:
case vfs.ENOSYS:
return syscall.ENOSYS
case vfs.EINVAL:
return syscall.EINVAL

View File

@@ -5,9 +5,9 @@ package mount2
import (
"testing"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/rclone/rclone/cmd/mountlib/mounttest"
)
func TestMount(t *testing.T) {
vfstest.RunTests(t, false, mount)
mounttest.RunTests(t, mount)
}

View File

@@ -27,20 +27,11 @@ type Node struct {
var _ fusefs.InodeEmbedder = (*Node)(nil)
// newNode creates a new fusefs.Node from a vfs Node
func newNode(fsys *FS, vfsNode vfs.Node) (node *Node) {
// Check the vfsNode to see if it has a fuse Node cached
// We must return the same fuse nodes for vfs Nodes
node, ok := vfsNode.Sys().(*Node)
if ok {
return node
}
node = &Node{
node: vfsNode,
func newNode(fsys *FS, node vfs.Node) *Node {
return &Node{
node: node,
fsys: fsys,
}
// Cache the node for later
vfsNode.SetSys(node)
return node
}
// String used for pretty printing.
@@ -192,7 +183,10 @@ func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (ino
if errno != 0 {
return nil, errno
}
newNode := newNode(n.fsys, vfsNode)
newNode := &Node{
node: vfsNode,
fsys: n.fsys,
}
// FIXME
// out.SetEntryTimeout(dt time.Duration)

View File

@@ -116,16 +116,13 @@ foreground mode by default, use the --daemon flag to specify background mode mod
Background mode is only supported on Linux and OSX, you can only run mount in
foreground mode on Windows.
On Linux/macOS/FreeBSD Start the mount like this where ` + "`/path/to/local/mount`" + `
is an **empty** **existing** directory.
Start the mount like this
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
Or on Windows like this where ` + "`X:`" + ` is an unused drive letter
or use a path to **non-existent** directory.
Or on Windows like this where X: is an unused drive letter
rclone ` + commandName + ` remote:path/to/files X:
rclone ` + commandName + ` remote:path/to/files C:\path\to\nonexistent\directory
When running in background mode the user will have to stop the mount manually (specified below).
@@ -315,8 +312,6 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
if err != nil {
log.Fatalf("Fatal error: %v", err)
}
} else if AllowNonEmpty && runtime.GOOS == "windows" {
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
}
// Work out the volume name, removing special
@@ -353,7 +348,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
// mount options
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory (not Windows).")
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
flags.BoolVarP(cmdFlags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
flags.BoolVarP(cmdFlags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
flags.BoolVarP(cmdFlags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")

View File

@@ -1,7 +1,8 @@
package vfstest
package mounttest
import (
"context"
"os"
"testing"
"time"
@@ -36,7 +37,7 @@ func TestDirCreateAndRemoveDir(t *testing.T) {
run.checkDir(t, "dir/|dir/subdir/")
// Check we can't delete a directory with stuff in
err := run.os.Remove(run.path("dir"))
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete subdir then dir - should produce no errors
@@ -55,7 +56,7 @@ func TestDirCreateAndRemoveFile(t *testing.T) {
run.checkDir(t, "dir/|dir/file 6")
// Check we can't delete a directory with stuff in
err := run.os.Remove(run.path("dir"))
err := os.Remove(run.path("dir"))
assert.Error(t, err, "file exists")
// Now delete file
@@ -74,14 +75,14 @@ func TestDirRenameFile(t *testing.T) {
run.createFile(t, "file", "potato")
run.checkDir(t, "dir/|file 6")
err := run.os.Rename(run.path("file"), run.path("file2"))
err := os.Rename(run.path("file"), run.path("file2"))
require.NoError(t, err)
run.checkDir(t, "dir/|file2 6")
data := run.readFile(t, "file2")
assert.Equal(t, "potato", data)
err = run.os.Rename(run.path("file2"), run.path("dir/file3"))
err = os.Rename(run.path("file2"), run.path("dir/file3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/file3 6")
@@ -102,11 +103,11 @@ func TestDirRenameEmptyDir(t *testing.T) {
run.mkdir(t, "dir1")
run.checkDir(t, "dir/|dir1/")
err := run.os.Rename(run.path("dir1"), run.path("dir/dir2"))
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/")
err = run.os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/")
@@ -124,11 +125,11 @@ func TestDirRenameFullDir(t *testing.T) {
run.createFile(t, "dir1/potato.txt", "maris piper")
run.checkDir(t, "dir/|dir1/|dir1/potato.txt 11")
err := run.os.Rename(run.path("dir1"), run.path("dir/dir2"))
err := os.Rename(run.path("dir1"), run.path("dir/dir2"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir2/|dir/dir2/potato.txt 11")
err = run.os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
err = os.Rename(run.path("dir/dir2"), run.path("dir/dir3"))
require.NoError(t, err)
run.checkDir(t, "dir/|dir/dir3/|dir/dir3/potato.txt 11")
@@ -144,10 +145,10 @@ func TestDirModTime(t *testing.T) {
run.mkdir(t, "dir")
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
err := run.os.Chtimes(run.path("dir"), mtime, mtime)
err := os.Chtimes(run.path("dir"), mtime, mtime)
require.NoError(t, err)
info, err := run.os.Stat(run.path("dir"))
info, err := os.Stat(run.path("dir"))
require.NoError(t, err)
// avoid errors because of timezone differences
@@ -213,7 +214,7 @@ func TestDirCacheFlushOnDirRename(t *testing.T) {
run.readLocal(t, localDm, "")
assert.Equal(t, dm, localDm, "expected vs fuse mount")
err = run.os.Rename(run.path("dir"), run.path("rid"))
err = os.Rename(run.path("dir"), run.path("rid"))
require.NoError(t, err)
dm = newDirMap("rid/|rid/subdir/|rid/file 1")

View File

@@ -1,6 +1,7 @@
package vfstest
package mounttest
import (
"os"
"runtime"
"testing"
@@ -42,7 +43,7 @@ func TestRenameOpenHandle(t *testing.T) {
require.NoError(t, err)
// attempt to rename open file
err = run.os.Rename(path, path+"bla")
err = os.Rename(path, path+"bla")
require.NoError(t, err)
// close open writers to allow rename on remote to go through

View File

@@ -1,4 +1,4 @@
package vfstest
package mounttest
import (
"os"
@@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/rclone/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -18,10 +17,10 @@ func TestFileModTime(t *testing.T) {
run.createFile(t, "file", "123")
mtime := time.Date(2012, time.November, 18, 17, 32, 31, 0, time.UTC)
err := run.os.Chtimes(run.path("file"), mtime, mtime)
err := os.Chtimes(run.path("file"), mtime, mtime)
require.NoError(t, err)
info, err := run.os.Stat(run.path("file"))
info, err := os.Stat(run.path("file"))
require.NoError(t, err)
// avoid errors because of timezone differences
@@ -30,14 +29,14 @@ func TestFileModTime(t *testing.T) {
run.rm(t, "file")
}
// run.os.Create without opening for write too
func osCreate(name string) (vfs.OsFiler, error) {
return run.os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
// os.Create without opening for write too
func osCreate(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
// run.os.Create with append
func osAppend(name string) (vfs.OsFiler, error) {
return run.os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
// os.Create with append
func osAppend(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
}
// TestFileModTimeWithOpenWriters tests mod time on open files
@@ -56,7 +55,7 @@ func TestFileModTimeWithOpenWriters(t *testing.T) {
_, err = f.Write([]byte{104, 105})
require.NoError(t, err)
err = run.os.Chtimes(filepath, mtime, mtime)
err = os.Chtimes(filepath, mtime, mtime)
require.NoError(t, err)
err = f.Close()
@@ -64,7 +63,7 @@ func TestFileModTimeWithOpenWriters(t *testing.T) {
run.waitForWriters()
info, err := run.os.Stat(filepath)
info, err := os.Stat(filepath)
require.NoError(t, err)
// avoid errors because of timezone differences

View File

@@ -1,6 +1,6 @@
// Test suite for rclonefs
package vfstest
package mounttest
import (
"context"
@@ -23,8 +23,8 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -33,7 +33,7 @@ type (
// UnmountFn is called to unmount the file system
UnmountFn func() error
// MountFn is called to mount the file system
MountFn func(f fs.Fs, mountpoint string) (vfs *vfs.VFS, unmountResult <-chan error, unmount func() error, err error)
MountFn func(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error)
)
var (
@@ -41,18 +41,16 @@ var (
)
// RunTests runs all the tests against all the VFS cache modes
//
// If useVFS is set then it runs the tests against a VFS rather than amount
func RunTests(t *testing.T, useVFS bool, fn MountFn) {
func RunTests(t *testing.T, fn MountFn) {
mountFn = fn
flag.Parse()
cacheModes := []vfscommon.CacheMode{
vfscommon.CacheModeOff,
vfscommon.CacheModeMinimal,
vfscommon.CacheModeWrites,
vfscommon.CacheModeFull,
cacheModes := []vfs.CacheMode{
vfs.CacheModeOff,
vfs.CacheModeMinimal,
vfs.CacheModeWrites,
vfs.CacheModeFull,
}
run = newRun(useVFS)
run = newRun()
for _, cacheMode := range cacheModes {
run.cacheMode(cacheMode)
log.Printf("Starting test run with cache mode %v", cacheMode)
@@ -94,9 +92,7 @@ func RunTests(t *testing.T, useVFS bool, fn MountFn) {
// Run holds the remotes for a test run
type Run struct {
os Oser
vfs *vfs.VFS
useVFS bool // set if we are testing a VFS not a mount
mountPath string
fremote fs.Fs
fremoteName string
@@ -115,11 +111,11 @@ var run *Run
// r.fremote is an empty remote Fs
//
// Finalise() will tidy them away when done.
func newRun(useVFS bool) *Run {
func newRun() *Run {
r := &Run{
useVFS: useVFS,
umountResult: make(chan error, 1),
}
fstest.Initialise()
var err error
@@ -133,9 +129,7 @@ func newRun(useVFS bool) *Run {
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
}
if !r.useVFS {
r.mountPath = findMountPath()
}
r.mountPath = findMountPath()
// Mount it up
r.mount()
@@ -175,12 +169,6 @@ func (r *Run) mount() {
} else {
log.Printf("mount OK")
}
if r.useVFS {
r.os = vfsOs{r.vfs}
} else {
r.os = realOs{}
}
}
func (r *Run) umount() {
@@ -219,7 +207,7 @@ func (r *Run) umount() {
}
// cacheMode flushes the VFS and changes the CacheMode
func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) {
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
if r.skip {
log.Printf("FUSE not found so skipping cacheMode")
return
@@ -250,31 +238,18 @@ func (r *Run) skipIfNoFUSE(t *testing.T) {
}
}
func (r *Run) skipIfVFS(t *testing.T) {
if r.useVFS {
t.Skip("Not running under VFS")
}
}
// Finalise cleans the remote and unmounts
func (r *Run) Finalise() {
r.umount()
r.cleanRemote()
if r.useVFS {
// FIXME
} else {
err := os.RemoveAll(r.mountPath)
if err != nil {
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
}
err := os.RemoveAll(r.mountPath)
if err != nil {
log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err)
}
}
// path returns an OS local path for filepath
func (r *Run) path(filePath string) string {
if r.useVFS {
return filePath
}
// return windows drive letter root as E:\
if filePath == "" && runtime.GOOS == "windows" {
return run.mountPath + `\`
@@ -309,7 +284,7 @@ func (dm dirMap) filesOnly() dirMap {
// reads the local tree into dir
func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
realPath := r.path(filePath)
files, err := r.os.ReadDir(realPath)
files, err := ioutil.ReadDir(realPath)
require.NoError(t, err)
for _, fi := range files {
name := path.Join(filePath, fi.Name())
@@ -378,13 +353,13 @@ func (r *Run) waitForWriters() {
// If there is an error writing then writeFile
// deletes it an existing file and tries again.
func writeFile(filename string, data []byte, perm os.FileMode) error {
f, err := run.os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
f, err := file.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
err = run.os.Remove(filename)
err = os.Remove(filename)
if err != nil {
return err
}
f, err = run.os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, perm)
f, err = file.OpenFile(filename, os.O_WRONLY|os.O_CREATE, perm)
if err != nil {
return err
}
@@ -408,7 +383,7 @@ func (r *Run) createFile(t *testing.T, filepath string, contents string) {
func (r *Run) readFile(t *testing.T, filepath string) string {
filepath = r.path(filepath)
result, err := run.os.ReadFile(filepath)
result, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
time.Sleep(100 * time.Millisecond) // FIXME wait for Release
return string(result)
@@ -416,18 +391,18 @@ func (r *Run) readFile(t *testing.T, filepath string) string {
func (r *Run) mkdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := run.os.Mkdir(filepath, 0700)
err := os.Mkdir(filepath, 0700)
require.NoError(t, err)
}
func (r *Run) rm(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := run.os.Remove(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
// Wait for file to disappear from listing
for i := 0; i < 100; i++ {
_, err := run.os.Stat(filepath)
_, err := os.Stat(filepath)
if os.IsNotExist(err) {
return
}
@@ -438,14 +413,13 @@ func (r *Run) rm(t *testing.T, filepath string) {
func (r *Run) rmdir(t *testing.T, filepath string) {
filepath = r.path(filepath)
err := run.os.Remove(filepath)
err := os.Remove(filepath)
require.NoError(t, err)
}
// TestMount checks that the Fs is mounted by seeing if the mountpoint
// is in the mount output
func TestMount(t *testing.T) {
run.skipIfVFS(t)
run.skipIfNoFUSE(t)
if runtime.GOOS == "windows" {
t.Skip("not running on windows")
@@ -458,7 +432,6 @@ func TestMount(t *testing.T) {
// TestRoot checks root directory is present and correct
func TestRoot(t *testing.T) {
run.skipIfVFS(t)
run.skipIfNoFUSE(t)
fi, err := os.Lstat(run.mountPath)

View File

@@ -1,8 +1,9 @@
package vfstest
package mounttest
import (
"io"
"io/ioutil"
"os"
"testing"
"time"
@@ -18,7 +19,7 @@ func TestReadByByte(t *testing.T) {
run.checkDir(t, "testfile 10")
for i := 0; i < len(data); i++ {
fd, err := run.os.Open(run.path("testfile"))
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
for j := 0; j < i; j++ {
buf := make([]byte, 1)
@@ -49,7 +50,7 @@ func TestReadChecksum(t *testing.T) {
// The hash comparison would fail in Flush, if we did not
// ensure we read the whole file
fd, err := run.os.Open(run.path("bigfile"))
fd, err := os.Open(run.path("bigfile"))
assert.NoError(t, err)
buf := make([]byte, 10)
_, err = io.ReadFull(fd, buf)
@@ -59,7 +60,7 @@ func TestReadChecksum(t *testing.T) {
// The hash comparison would fail, because we only read parts
// of the file
fd, err = run.os.Open(run.path("bigfile"))
fd, err = os.Open(run.path("bigfile"))
assert.NoError(t, err)
// read at start
_, err = io.ReadFull(fd, buf)
@@ -84,7 +85,7 @@ func TestReadSeek(t *testing.T) {
run.createFile(t, "testfile", string(data))
run.checkDir(t, "testfile 10")
fd, err := run.os.Open(run.path("testfile"))
fd, err := os.Open(run.path("testfile"))
assert.NoError(t, err)
// Seek to half way

View File

@@ -1,6 +1,6 @@
// +build !linux,!darwin,!freebsd
package vfstest
package mounttest
import (
"runtime"

View File

@@ -1,8 +1,9 @@
// +build linux darwin freebsd
package vfstest
package mounttest
import (
"os"
"syscall"
"testing"
@@ -11,12 +12,11 @@ import (
// TestReadFileDoubleClose tests double close on read
func TestReadFileDoubleClose(t *testing.T) {
run.skipIfVFS(t)
run.skipIfNoFUSE(t)
run.createFile(t, "testdoubleclose", "hello")
in, err := run.os.Open(run.path("testdoubleclose"))
in, err := os.Open(run.path("testdoubleclose"))
assert.NoError(t, err)
fd := in.Fd()

View File

@@ -1,13 +1,14 @@
package vfstest
package mounttest
import (
"os"
"runtime"
"testing"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/vfs"
)
// TestWriteFileNoWrite tests writing a file with no write()'s to it
@@ -88,10 +89,9 @@ func TestWriteFileFsync(t *testing.T) {
// TestWriteFileDup tests behavior of mmap() in Python by using dup() on a file handle
func TestWriteFileDup(t *testing.T) {
run.skipIfVFS(t)
run.skipIfNoFUSE(t)
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
t.Skip("not supported on vfs-cache-mode < writes")
return
}
@@ -136,7 +136,7 @@ func TestWriteFileDup(t *testing.T) {
func TestWriteFileAppend(t *testing.T) {
run.skipIfNoFUSE(t)
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
t.Skip("not supported on vfs-cache-mode < writes")
return
}
@@ -169,7 +169,7 @@ func TestWriteFileAppend(t *testing.T) {
err = fh.Close()
require.NoError(t, err)
info, err := run.os.Stat(filepath)
info, err := os.Stat(filepath)
require.NoError(t, err)
require.EqualValues(t, len(testData)+len(appendData), info.Size())

View File

@@ -1,6 +1,6 @@
// +build !linux,!darwin,!freebsd
package vfstest
package mounttest
import (
"runtime"

Some files were not shown because too many files have changed in this diff Show More