mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
51 Commits
fix-onedri
...
fix-5995-z
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2f55eccbc | ||
|
|
3933b1e7f5 | ||
|
|
151224d3f8 | ||
|
|
1f28f4d05d | ||
|
|
1c99661d8c | ||
|
|
04b54bbb1e | ||
|
|
90cda2d6c2 | ||
|
|
dbd9ce78e6 | ||
|
|
cbc18e2693 | ||
|
|
67c675d7ad | ||
|
|
c080b39e47 | ||
|
|
8504da496b | ||
|
|
67240bd541 | ||
|
|
6ce0168ba5 | ||
|
|
67f5f04a77 | ||
|
|
91f8894285 | ||
|
|
655d63b4fd | ||
|
|
d3d843a11d | ||
|
|
57803bee22 | ||
|
|
be53dcc9c9 | ||
|
|
bd787e8f45 | ||
|
|
3cb7734eac | ||
|
|
d08ed7d1e9 | ||
|
|
f279e4ab01 | ||
|
|
35349657cd | ||
|
|
ce3b65e6dc | ||
|
|
0008cb4934 | ||
|
|
2ea5b4f0b8 | ||
|
|
b5818454f7 | ||
|
|
555def2da7 | ||
|
|
02b7613104 | ||
|
|
b342c6cf9c | ||
|
|
8a6857c295 | ||
|
|
21fd13f10d | ||
|
|
5cc7797f9e | ||
|
|
8bf2d6b6c8 | ||
|
|
85eb9776bd | ||
|
|
47539ec0e6 | ||
|
|
58b327a9f6 | ||
|
|
1107da7247 | ||
|
|
8d1fff9a82 | ||
|
|
2c5923ab1a | ||
|
|
1ad22b8881 | ||
|
|
0501773db1 | ||
|
|
cb8842941b | ||
|
|
5439a2c5c6 | ||
|
|
d347ac0154 | ||
|
|
9f33eb2e65 | ||
|
|
fe801b8fef | ||
|
|
6b158f33a3 | ||
|
|
5a6d233924 |
@@ -20,7 +20,7 @@ issues:
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
@@ -77,7 +77,7 @@ Make sure you
|
||||
* Add [documentation](#writing-documentation) for a new feature.
|
||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||
|
||||
When you are done with that push your changes to Github:
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
git push -u origin my-new-feature
|
||||
|
||||
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
||||
|
||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||
|
||||
## Using Git and Github ##
|
||||
## Using Git and GitHub ##
|
||||
|
||||
### Committing your changes ###
|
||||
|
||||
|
||||
@@ -49,6 +49,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
|
||||
@@ -53,6 +53,14 @@ doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package alias implements a virtual provider to rename existing remotes.
|
||||
package alias
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package all imports all the backends
|
||||
package all
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
@@ -1678,14 +1677,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := int64(maxUploadParts)
|
||||
uploadParts := maxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
// calculate size of parts/blocks
|
||||
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
|
||||
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(partSize),
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Backblaze B2 API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -238,7 +239,7 @@ type GetFileInfoRequest struct {
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// 10 number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system
|
||||
// Package b2 provides an interface to the Backblaze B2 object storage system.
|
||||
package b2
|
||||
|
||||
// FIXME should we remove sha1 checks from here as rclone now supports
|
||||
|
||||
@@ -97,7 +97,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -266,7 +266,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
|
||||
1
backend/cache/cache.go
vendored
1
backend/cache/cache.go
vendored
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
@@ -64,7 +64,7 @@ import (
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// keep their temporary chunk names (with the transaction identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
@@ -1079,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cahced return it now
|
||||
// if xactID has already been read and cached return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -90,7 +91,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
@@ -130,7 +131,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -367,13 +368,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta := readMetadata(ctx, mo)
|
||||
if meta == nil {
|
||||
return nil, errors.New("error decoding metadata")
|
||||
meta, err := readMetadata(ctx, mo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
return f.newObject(o, mo, meta), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o, mo, meta), nil
|
||||
}
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
@@ -451,7 +455,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we allready read
|
||||
// Need to include what we already read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
@@ -677,7 +681,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(dataObject, mo, meta), err
|
||||
return f.newObject(dataObject, mo, meta), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -731,7 +735,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// If our new object is compressed we have to rename it with the correct size.
|
||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
@@ -742,7 +746,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// will break stuff. Right no I can't think of a way to make this work.
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
@@ -1040,24 +1044,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
// Open our meradata object
|
||||
rc, err := mo.Open(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(mo, "Error closing object: %v", err)
|
||||
}
|
||||
}()
|
||||
defer fs.CheckClose(rc, &err)
|
||||
jr := json.NewDecoder(rc)
|
||||
meta = new(ObjectMetadata)
|
||||
if err = jr.Decode(meta); err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return meta
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
@@ -1102,6 +1101,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
origName := o.Remote()
|
||||
if o.meta.Mode != Uncompressed || compressible {
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newObject.Object.Remote() != o.Object.Remote() {
|
||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||
return removeErr
|
||||
@@ -1115,9 +1117,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// If we are, just update the object and metadata
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Update object metadata and return
|
||||
o.Object = newObject.Object
|
||||
@@ -1128,6 +1130,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1140,6 +1145,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
||||
|
||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1167,7 +1175,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta = readMetadata(ctx, o.mo)
|
||||
o.meta, err = readMetadata(ctx, o.mo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ type fileNameEncoding interface {
|
||||
// - we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// EncodeToString encodes a string using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
|
||||
@@ -125,7 +125,7 @@ names, or for debugging purposes.`,
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
length and if it's case sensitive.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
|
||||
@@ -1210,6 +1210,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -3305,7 +3306,7 @@ drives found and a combined drive.
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
|
||||
@@ -518,6 +518,9 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
|
||||
@@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package fichier provides an interface to the 1Fichier storage system.
|
||||
package fichier
|
||||
|
||||
import (
|
||||
|
||||
@@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
// FileCopy is used in the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
@@ -95,7 +95,7 @@ type Status struct {
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
// Status satisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
m configmap.Mapper // to save config
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
// Wait for the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
|
||||
@@ -81,8 +81,22 @@ security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
|
||||
Name: "concurrency",
|
||||
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then it just needs to be one more than the
|
||||
maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -110,6 +124,11 @@ to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_list_hidden",
|
||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -191,6 +210,7 @@ type Options struct {
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
@@ -353,6 +373,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.opt.ForceListHidden {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
|
||||
@@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
@@ -319,6 +319,10 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -343,6 +347,7 @@ type Options struct {
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -523,7 +528,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
|
||||
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
|
||||
if opt.Endpoint != "" {
|
||||
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
|
||||
}
|
||||
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Google Photos API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -178,7 +178,7 @@ type Fs struct {
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
unAuth *rest.Client // unauthenticated http client
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
ts *oauthutil.TokenSource // token source for oauth2
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
startTime time.Time // time Fs was started - used for datestamps
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package hdfs provides an interface to the HDFS storage system.
|
||||
package hdfs
|
||||
|
||||
import (
|
||||
|
||||
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
||||
}
|
||||
|
||||
// Do not allow the root-prefix to be non-existent nor a directory,
|
||||
// Do not allow the root-prefix to be nonexistent nor a directory,
|
||||
// but it can be empty.
|
||||
if f.opt.RootPrefix != "" {
|
||||
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
||||
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// should be retried after the parent-directories of the destination have been created.
|
||||
// If so, it will create the parent-directories.
|
||||
//
|
||||
// If any errors arrise while finding the source or
|
||||
// If any errors arise while finding the source or
|
||||
// creating the parent-directory those will be returned.
|
||||
// Otherwise returns the originalError.
|
||||
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
||||
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
} else {
|
||||
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
||||
}
|
||||
// Try to check if object was updated, eitherway.
|
||||
// Try to check if object was updated, either way.
|
||||
// Metadata should be updated even if the upload fails.
|
||||
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
||||
} else {
|
||||
|
||||
@@ -138,7 +138,7 @@ var testTable = []struct {
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
// The input-data is constructed by concatenating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package internal provides utilities for HiDrive.
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
||||
@@ -227,7 +227,7 @@ type Object struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||
// IAFile represents a subset of object in MetadataResponse.Files
|
||||
type IAFile struct {
|
||||
Name string `json:"name"`
|
||||
// Source string `json:"source"`
|
||||
@@ -243,7 +243,7 @@ type IAFile struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
||||
type MetadataResponse struct {
|
||||
Files []IAFile `json:"files"`
|
||||
ItemSize int64 `json:"item_size"`
|
||||
@@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Jottacloud API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package jottacloud provides an interface to the Jottacloud storage system.
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
@@ -1417,7 +1418,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package koofr provides an interface to the Koofr storage system.
|
||||
package koofr
|
||||
|
||||
import (
|
||||
@@ -667,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// I am not sure about meaning of "path" parameter; in my experiments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -234,15 +235,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -286,6 +288,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
@@ -295,6 +300,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@@ -439,6 +445,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
||||
|
||||
fsDirPath := f.localPath(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
@@ -489,6 +497,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
if fierr != nil {
|
||||
// Don't report errors on any file names that are excluded
|
||||
if useFilter {
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
if !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
@@ -506,6 +521,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
@@ -190,7 +192,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
@@ -366,3 +368,36 @@ func TestMetadata(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
// setBTime changes the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,6 +6,8 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -16,12 +18,30 @@ const (
|
||||
xattrSupported = xattr.XATTR_SUPPORTED
|
||||
)
|
||||
|
||||
// Check to see if the error supplied is a not supported error, and if
|
||||
// so, disable xattrs
|
||||
func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
xattrErr, ok := err.(*xattr.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getXattr returns the extended attributes for an object
|
||||
//
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
@@ -31,6 +51,9 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
list, err = xattr.LList(o.path)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr: %w", err)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
@@ -45,6 +68,9 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
v, err = xattr.LGet(o.path, k)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
|
||||
}
|
||||
k = strings.ToLower(k)
|
||||
@@ -64,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
@@ -80,6 +106,9 @@ func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
err = xattr.LSet(o.path, k, v)
|
||||
}
|
||||
if err != nil {
|
||||
if o.fs.xattrIsNotSupported(err) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Mail.ru API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package mailru provides an interface to the Mail.ru Cloud storage system.
|
||||
package mailru
|
||||
|
||||
import (
|
||||
|
||||
@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
||||
}
|
||||
// i is number of directories to create (may be 0)
|
||||
// node is directory to create them from
|
||||
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
return f._rootNode, nil
|
||||
}
|
||||
|
||||
// Check for pre-existing root
|
||||
// Check for preexisting root
|
||||
absRoot := f.srv.FS.GetRoot()
|
||||
node, err := f.findDir(absRoot, f.root)
|
||||
//log.Printf("findRoot findDir %p %v", node, err)
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
filetype string // dir, file or symlink
|
||||
dirscreated map[string]bool // if implicit dir has been created already
|
||||
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
||||
statcache map[string][]File // cache successfull stat requests
|
||||
statcache map[string][]File // cache successful stat requests
|
||||
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Types passed and returned to and from the API
|
||||
|
||||
// Package api provides types used by the OneDrive API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -14,7 +13,7 @@ const (
|
||||
PackageTypeOneNote = "oneNote"
|
||||
)
|
||||
|
||||
// Error is returned from one drive when things go wrong
|
||||
// Error is returned from OneDrive when things go wrong
|
||||
type Error struct {
|
||||
ErrorInfo struct {
|
||||
Code string `json:"code"`
|
||||
@@ -71,7 +70,7 @@ type Drive struct {
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents represents date and time information for the
|
||||
// Timestamp represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
|
||||
@@ -600,14 +600,14 @@ type Options struct {
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote one drive
|
||||
// Fs represents a remote OneDrive
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the OneDrive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@@ -615,7 +615,7 @@ type Fs struct {
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
}
|
||||
|
||||
// Object describes a one drive object
|
||||
// Object describes a OneDrive object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
@@ -645,7 +645,7 @@ func (f *Fs) Root() string {
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("One drive root '%s'", f.root)
|
||||
return fmt.Sprintf("OneDrive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -653,7 +653,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// parsePath parses a one drive 'url'
|
||||
// parsePath parses a OneDrive 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
@@ -891,6 +891,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
f.features.ChangeNotify = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, _, err := f.readMetaDataForPath(ctx, "")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package opendrive provides an interface to the OpenDrive storage system.
|
||||
package opendrive
|
||||
|
||||
import (
|
||||
|
||||
@@ -13,7 +13,7 @@ const (
|
||||
timeFormat = `"` + time.RFC1123Z + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// pcloud API, by using RFC1123Z
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package putio provides an interface to the put.io storage system.
|
||||
package putio
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
package qingstor
|
||||
|
||||
import (
|
||||
|
||||
@@ -64,7 +64,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -116,6 +116,9 @@ func init() {
|
||||
}, {
|
||||
Value: "IDrive",
|
||||
Help: "IDrive e2",
|
||||
}, {
|
||||
Value: "IONOS",
|
||||
Help: "IONOS Cloud",
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
@@ -384,10 +387,24 @@ func init() {
|
||||
Value: "auto",
|
||||
Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "IONOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "de",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "eu-central-2",
|
||||
Help: "Berlin, Germany",
|
||||
}, {
|
||||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -698,6 +715,20 @@ func init() {
|
||||
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Singapore Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
|
||||
Provider: "IONOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-eu-central-1.ionoscloud.com",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "s3-eu-central-2.ionoscloud.com",
|
||||
Help: "Berlin, Germany",
|
||||
}, {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
@@ -1001,7 +1032,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1411,7 +1442,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -2009,7 +2040,7 @@ See [the time option docs](/docs/#time-option) for valid formats.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to S3 with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
@@ -2116,7 +2147,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int64 `config:"max_upload_parts"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
@@ -2537,6 +2568,10 @@ func setQuirks(opt *Options) {
|
||||
useMultipartEtag = false // untested
|
||||
case "IDrive":
|
||||
virtualHostStyle = false
|
||||
case "IONOS":
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -4718,10 +4753,10 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*uploadParts))
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
partSize = chunksize.Calculator(o, int(uploadParts), f.opt.ChunkSize)
|
||||
partSize = chunksize.Calculator(o, size, uploadParts, f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
memPool := f.getMemoryPool(int64(partSize))
|
||||
@@ -5199,7 +5234,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
|
||||
@@ -78,7 +78,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gziped file we uploaded can be
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded with and without decompression
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
@@ -116,7 +116,7 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
// PutTestcontents checks the received object
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Seafile API.
|
||||
package api
|
||||
|
||||
// Some api objects are duplicated with only small differences,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package seafile provides an interface to the Seafile storage system.
|
||||
package seafile
|
||||
|
||||
import (
|
||||
@@ -136,7 +137,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
authMu sync.Mutex // Mutex to protect library decryption
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
package sftp
|
||||
|
||||
import (
|
||||
@@ -276,19 +275,24 @@ Set to 0 to keep connections indefinitely.
|
||||
Name: "chunk_size",
|
||||
Help: `Upload and download chunk size.
|
||||
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
This controls the maximum size of payload in SFTP protocol packets.
|
||||
The RFC limits this to 32768 bytes (32k), which is the default. However,
|
||||
a lot of servers support larger sizes, typically limited to a maximum
|
||||
total package size of 256k, and setting it larger will increase transfer
|
||||
speed dramatically on high latency links. This includes OpenSSH, and,
|
||||
for example, using the value of 255k works well, leaving plenty of room
|
||||
for overhead while still being within a total packet size of 256k.
|
||||
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
Make sure to test thoroughly before using a value higher than 32k,
|
||||
and only use it if you always connect to the same server or after
|
||||
sufficiently broad testing. If you get errors such as
|
||||
"failed to send packet payload: EOF", lots of "connection lost",
|
||||
or "corrupted on transfer", when copying a larger file, try lowering
|
||||
the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp)
|
||||
sends packets with standard 32k maximum payload so you must not
|
||||
set a different chunk_size when downloading files, but it accepts
|
||||
packets up to the 256k total size, so for uploads the chunk_size
|
||||
can be set as for the OpenSSH example above.
|
||||
`,
|
||||
Default: 32 * fs.Kibi,
|
||||
Advanced: true,
|
||||
@@ -1167,6 +1171,10 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
err = c.sftpClient.Mkdir(dirPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
fs.Debugf(f, "directory %q exists after Mkdir is attempted", dirPath)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Sia API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package sia provides an interface to the Sia storage system.
|
||||
package sia
|
||||
|
||||
import (
|
||||
|
||||
@@ -200,7 +200,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
m configmap.Mapper // config file access
|
||||
|
||||
@@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
|
||||
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
|
||||
{"0123456789", "0123456789"},
|
||||
{"abc/ABC/123", "abc/ABC/123"},
|
||||
{" ", "%20%20%20"},
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package policy provides utilities for the union implementation.
|
||||
package policy
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package union implements a virtual provider to join existing remotes.
|
||||
package union
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package upstream provides utility functionality to union.
|
||||
package upstream
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Uptobox API.
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
@@ -79,7 +80,7 @@ type UploadInfo struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the respnse to a successful upload
|
||||
// UploadResponse is the response to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package uptobox provides an interface to the Uptobox storage system.
|
||||
package uptobox
|
||||
|
||||
import (
|
||||
@@ -162,7 +163,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) {
|
||||
return "//" + fullPath[:i], fullPath[i+1:]
|
||||
}
|
||||
|
||||
// splitPath is modified splitPath version that doesn't include the seperator
|
||||
// splitPath is modified splitPath version that doesn't include the separator
|
||||
// in the base path
|
||||
func (f *Fs) splitPath(pth string) (string, string) {
|
||||
// chop of any leading or trailing '/'
|
||||
@@ -478,7 +479,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
// yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
|
||||
// create upload request
|
||||
opts := rest.Opts{
|
||||
@@ -756,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return fmt.Errorf("dirmove: source not found: %w", err)
|
||||
}
|
||||
// check if the destination allready exists
|
||||
// check if the destination already exists
|
||||
dstPath := f.dirPath(dstRemote)
|
||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
|
||||
if err == nil {
|
||||
@@ -781,7 +782,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
needMove := srcBase != dstBase
|
||||
|
||||
// if we have to rename we'll have to use a temporary name since
|
||||
// there could allready be a directory with the same name as the src directory
|
||||
// there could already be a directory with the same name as the src directory
|
||||
if needRename {
|
||||
// rename to a temporary name
|
||||
tmpName := "rcloneTemp" + random.String(8)
|
||||
|
||||
@@ -148,7 +148,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Yandex API.
|
||||
package api
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package yandex provides an interface to the Yandex storage system.
|
||||
package yandex
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package api provides types used by the Zoho API.
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -5,7 +6,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for Zoho
|
||||
// Time represents date and time information for Zoho
|
||||
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -150,8 +150,8 @@ func init() {
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
worksspaceID := config.Result
|
||||
m.Set(configRootID, worksspaceID)
|
||||
workspaceID := config.Result
|
||||
m.Set(configRootID, workspaceID)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
@@ -206,7 +206,7 @@ type Fs struct {
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
@@ -670,27 +670,28 @@ func isSimpleName(s string) bool {
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
params := url.Values{}
|
||||
params.Set("filename", name)
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
contentLength := overhead + size
|
||||
uploadID := random.String(20) // random upload ID
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload",
|
||||
Body: formReader,
|
||||
ContentType: contentType,
|
||||
ContentLength: &contentLength,
|
||||
Options: options,
|
||||
Parameters: params,
|
||||
TransferEncoding: []string{"identity"},
|
||||
Method: "POST",
|
||||
//RootURL: "https://upload.zoho.com/workdrive-api/v1",
|
||||
RootURL: "https://upload.zoho.eu/workdrive-api/v1",
|
||||
Path: "/stream/upload",
|
||||
Body: in,
|
||||
ContentType: fs.MimeTypeFromName(name), // FIXME should read mime type of original object
|
||||
ContentLength: &size,
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"x-filename": name,
|
||||
"x-parent_id": parent,
|
||||
"upload-id": uploadID,
|
||||
"x-streammode": "1",
|
||||
},
|
||||
}
|
||||
if size < 0 {
|
||||
opts.ContentLength = nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var uploadResponse *api.UploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1264,7 +1265,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// upload was successfull, need to delete old object before rename
|
||||
// upload was successful, need to delete old object before rename
|
||||
if err = o.Remove(ctx); err != nil {
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ else
|
||||
fi
|
||||
|
||||
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
|
||||
--fast-list \
|
||||
--include "/${version}**" \
|
||||
--include "/branch/${version}**" \
|
||||
--include "/branch/*/${version}**" \
|
||||
memstore:beta-rclone-org
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package about provides the about command.
|
||||
package about
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package authorize provides the authorize command.
|
||||
package authorize
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package backend provides the backend command.
|
||||
package backend
|
||||
|
||||
import (
|
||||
|
||||
@@ -290,7 +290,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
return
|
||||
}
|
||||
|
||||
// exccessDeletes checks whether number of deletes is within allowed range
|
||||
// excessDeletes checks whether number of deletes is within allowed range
|
||||
func (ds *deltaSet) excessDeletes() bool {
|
||||
maxDelete := ds.opt.MaxDelete
|
||||
maxRatio := float64(maxDelete) / 100.0
|
||||
|
||||
@@ -15,7 +15,7 @@ func makeHelp(help string) string {
|
||||
return replacer.Replace(help)
|
||||
}
|
||||
|
||||
var shortHelp = `Perform bidirectonal synchronization between two paths.`
|
||||
var shortHelp = `Perform bidirectional synchronization between two paths.`
|
||||
|
||||
var rcHelp = makeHelp(`This takes the following parameters
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
// Package cachestats provides the cachestats command.
|
||||
package cachestats
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cat provides the cat command.
|
||||
package cat
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package check provides the check command.
|
||||
package check
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package checksum provides the checksum command.
|
||||
package checksum
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cleanup provides the cleanup command.
|
||||
package cleanup
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
// +build cmount
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// This uses the cgo based cgofuse library
|
||||
package cmount
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
|
||||
//go:build brew && darwin
|
||||
// +build brew,darwin
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// Build for macos with the brew tag to handle the absence
|
||||
// of fuse and print an appropriate error message
|
||||
package cmount
|
||||
|
||||
import (
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
// +build linux,cgo darwin,cgo freebsd,cgo windows
|
||||
// +build !race !windows
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// FIXME this doesn't work with the race detector under Windows either
|
||||
// hanging or producing lots of differences.
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||
// +build !linux !cgo !cmount
|
||||
// +build !darwin !cgo !cmount
|
||||
// +build !freebsd !cgo !cmount
|
||||
// +build !windows !cmount
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
// Build for cmount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files".
|
||||
package cmount
|
||||
|
||||
@@ -80,7 +80,7 @@ func handleDefaultMountpath() (string, error) {
|
||||
func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||
// Assuming mount path is a valid network share path (UNC format, "\\Server\Share").
|
||||
// Always mount as network drive, regardless of the NetworkMode option.
|
||||
// Find an unused drive letter to use as mountpoint, the the supplied path can
|
||||
// Find an unused drive letter to use as mountpoint, the supplied path can
|
||||
// be used as volume prefix (network share path) instead of mountpoint.
|
||||
if !opt.NetworkMode {
|
||||
fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format")
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package config provides the config command.
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -139,7 +140,7 @@ are 100% certain you are already passing obscured passwords then use
|
||||
|rclone config password| command.
|
||||
|
||||
The flag |--non-interactive| is for use by applications that wish to
|
||||
configure rclone themeselves, rather than using rclone's text based
|
||||
configure rclone themselves, rather than using rclone's text based
|
||||
configuration questions. If this flag is set, and rclone needs to ask
|
||||
the user a question, a JSON blob will be returned with the question in
|
||||
it.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package copy provides the copy command.
|
||||
package copy
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package copyto provides the copyto command.
|
||||
package copyto
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package copyurl provides the copyurl command.
|
||||
package copyurl
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cryptcheck provides the cryptcheck command.
|
||||
package cryptcheck
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package cryptdecode provides the cryptdecode command.
|
||||
package cryptdecode
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package dedupe provides the dedupe command.
|
||||
package dedupe
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package delete provides the delete command.
|
||||
package delete
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package deletefile provides the deletefile command.
|
||||
package deletefile
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package genautocomplete provides the genautocomplete command.
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package gendocs provides the gendocs command.
|
||||
package gendocs
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// Package hashsum provides the hashsum command.
|
||||
package hashsum
|
||||
|
||||
import (
|
||||
@@ -98,7 +99,7 @@ For the MD5 and SHA1 algorithms there are also dedicated commands,
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user