mirror of
https://github.com/rclone/rclone.git
synced 2026-01-31 00:33:42 +00:00
Compare commits
25 Commits
v1.59.0
...
fix-union-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d1d847f18 | ||
|
|
7a24c173f6 | ||
|
|
fb60aeddae | ||
|
|
695736d1e4 | ||
|
|
f0396070eb | ||
|
|
f1166757ba | ||
|
|
9b76434ad5 | ||
|
|
440d0cd179 | ||
|
|
a047d30eca | ||
|
|
03d0f331f7 | ||
|
|
049674aeab | ||
|
|
50f053cada | ||
|
|
140af43c26 | ||
|
|
f467188876 | ||
|
|
4a4379b312 | ||
|
|
8c02fe7b89 | ||
|
|
11be920e90 | ||
|
|
8c19b355a5 | ||
|
|
67fd60275a | ||
|
|
b310490fa5 | ||
|
|
0ee0812a2b | ||
|
|
55bbff6346 | ||
|
|
9c6cfc1ff0 | ||
|
|
f753d7cd42 | ||
|
|
f5be1d6b65 |
9
.github/workflows/build.yml
vendored
9
.github/workflows/build.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
@@ -39,6 +39,13 @@ jobs:
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
|
||||
@@ -5,7 +5,7 @@ linters:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
#- revive
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
@@ -206,9 +206,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
f.upstreams[dir] = u
|
||||
if _, found := f.upstreams[dir]; found {
|
||||
err = fmt.Errorf("duplicate directory name %q", dir)
|
||||
} else {
|
||||
f.upstreams[dir] = u
|
||||
}
|
||||
mu.Unlock()
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
|
||||
@@ -758,6 +758,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -3299,7 +3302,7 @@ drives found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
@@ -190,6 +192,60 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gatewayTimeout := googleapi.Error{
|
||||
Code: 503,
|
||||
}
|
||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
||||
assert.True(t, timeoutRetry)
|
||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
||||
generic403 := googleapi.Error{
|
||||
Code: 403,
|
||||
}
|
||||
rLEItem := googleapi.ErrorItem{
|
||||
Reason: "rateLimitExceeded",
|
||||
Message: "User rate limit exceeded.",
|
||||
}
|
||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
||||
oldStopUpload := f.opt.StopOnUploadLimit
|
||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
||||
f.opt.StopOnUploadLimit = true
|
||||
f.opt.StopOnDownloadLimit = true
|
||||
defer func() {
|
||||
f.opt.StopOnUploadLimit = oldStopUpload
|
||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
||||
}()
|
||||
expectedRLError := fserrors.FatalError(&generic403)
|
||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, rateLimitRetry)
|
||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
||||
dQEItem := googleapi.ErrorItem{
|
||||
Reason: "downloadQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = dQEItem
|
||||
expectedDQError := fserrors.FatalError(&generic403)
|
||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, downloadQuotaRetry)
|
||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
||||
tDFLEItem := googleapi.ErrorItem{
|
||||
Reason: "teamDriveFileLimitExceeded",
|
||||
}
|
||||
generic403.Errors[0] = tDFLEItem
|
||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, teamDriveFileLimitRetry)
|
||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
||||
qEItem := googleapi.ErrorItem{
|
||||
Reason: "quotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = qEItem
|
||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
@@ -545,6 +601,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -304,6 +304,9 @@ outer:
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
if !b.Batching() {
|
||||
return
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
|
||||
@@ -42,54 +42,70 @@ func init() {
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"name": {
|
||||
Help: "Full file path, without the bucket part",
|
||||
Type: "filename",
|
||||
Example: "backend/internetarchive/internetarchive.go",
|
||||
Help: "Full file path, without the bucket part",
|
||||
Type: "filename",
|
||||
Example: "backend/internetarchive/internetarchive.go",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"source": {
|
||||
Help: "The source of the file",
|
||||
Type: "string",
|
||||
Example: "original",
|
||||
Help: "The source of the file",
|
||||
Type: "string",
|
||||
Example: "original",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"size": {
|
||||
Help: "File size in bytes",
|
||||
Type: "decimal number",
|
||||
Example: "123456",
|
||||
Help: "File size in bytes",
|
||||
Type: "decimal number",
|
||||
Example: "123456",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"md5": {
|
||||
Help: "MD5 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567012345670123456701234567",
|
||||
Help: "MD5 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"crc32": {
|
||||
Help: "CRC32 calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567",
|
||||
Help: "CRC32 calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"sha1": {
|
||||
Help: "SHA1 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "0123456701234567012345670123456701234567",
|
||||
Help: "SHA1 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "0123456701234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"format": {
|
||||
Help: "Name of format identified by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "Comma-Separated Values",
|
||||
Help: "Name of format identified by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "Comma-Separated Values",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"old_version": {
|
||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"viruscheck": {
|
||||
Help: "The last time viruscheck process was run for the file (?)",
|
||||
Type: "unixtime",
|
||||
Example: "1654191352",
|
||||
Help: "The last time viruscheck process was run for the file (?)",
|
||||
Type: "unixtime",
|
||||
Example: "1654191352",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"summation": {
|
||||
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
|
||||
Type: "string",
|
||||
Example: "md5",
|
||||
ReadOnly: true,
|
||||
},
|
||||
|
||||
"rclone-ia-mtime": {
|
||||
@@ -173,7 +189,7 @@ var roMetadataKey = map[string]interface{}{
|
||||
// do not add mtime here, it's a documented exception
|
||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
||||
"viruscheck": nil,
|
||||
"viruscheck": nil, "summation": nil,
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -222,6 +238,7 @@ type IAFile struct {
|
||||
Md5 string `json:"md5"`
|
||||
Crc32 string `json:"crc32"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Summation string `json:"summation"`
|
||||
|
||||
rawData json.RawMessage
|
||||
}
|
||||
@@ -1135,16 +1152,21 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
|
||||
}
|
||||
|
||||
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
|
||||
return &Object{
|
||||
ret := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
modTime: mtime,
|
||||
size: size,
|
||||
md5: file.Md5,
|
||||
crc32: file.Crc32,
|
||||
sha1: file.Sha1,
|
||||
rawData: file.rawData,
|
||||
}
|
||||
// hashes from _files.xml (where summation != "") is different from one in other files
|
||||
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
|
||||
if file.Summation == "" {
|
||||
ret.md5 = file.Md5
|
||||
ret.crc32 = file.Crc32
|
||||
ret.sha1 = file.Sha1
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
||||
|
||||
@@ -118,7 +118,7 @@ func init() {
|
||||
Help: "Microsoft Cloud Germany",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -2184,7 +2184,7 @@ func (o *Object) ID() string {
|
||||
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||
*
|
||||
* currently, the 21ViaNet's API differs in the following places:
|
||||
* currently, the Vnet Group's API differs in the following places:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||
* - this API doesn't work (gives invalid request)
|
||||
* - can be replaced with the following API:
|
||||
@@ -2233,7 +2233,7 @@ func escapeSingleQuote(str string) string {
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for Vnet Group)
|
||||
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||
|
||||
@@ -4570,7 +4570,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
var head s3.HeadObjectOutput
|
||||
structs.SetFrom(&head, req)
|
||||
structs.SetFrom(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||
head.ContentLength = &size
|
||||
// If we have done a single part PUT request then we can read these
|
||||
|
||||
@@ -67,8 +67,26 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Set NoHead for this test
|
||||
f.opt.NoHead = true
|
||||
defer func() {
|
||||
f.opt.NoHead = false
|
||||
}()
|
||||
contents := random.String(1000)
|
||||
item := fstest.NewItem("test-no-head", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("NoHead", f.InternalTestNoHead)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -935,11 +935,22 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
// It appears that WS FTP doesn't like relative paths,
|
||||
// and the openssh sftp tool also uses absolute paths.
|
||||
if !path.IsAbs(f.root) {
|
||||
path, err := c.sftpClient.RealPath(f.root)
|
||||
// Trying RealPath first, to perform proper server-side canonicalize.
|
||||
// It may fail (SSH_FX_FAILURE reported on WS FTP) and will then resort
|
||||
// to simple path join with current directory from Getwd (which can work
|
||||
// on WS FTP, even though it is also based on RealPath).
|
||||
absRoot, err := c.sftpClient.RealPath(f.root)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to resolve path - using relative paths: %v", err)
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
}
|
||||
} else {
|
||||
f.absRoot = path
|
||||
f.absRoot = absRoot
|
||||
fs.Debugf(f, "Relative path resolved to %q", f.absRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,11 +16,14 @@ func init() {
|
||||
// Given the order of the candidates, act on the first one found where the relative path exists.
|
||||
type EpFF struct{}
|
||||
|
||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
func (p *EpFF) epffIsLocal(ctx context.Context, upstreams []*upstream.Fs, filePath string, isLocal bool) (*upstream.Fs, error) {
|
||||
ch := make(chan *upstream.Fs, len(upstreams))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
if u.IsLocal() != isLocal {
|
||||
continue
|
||||
}
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
@@ -32,7 +35,10 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
}()
|
||||
}
|
||||
var u *upstream.Fs
|
||||
for range upstreams {
|
||||
for _, upstream := range upstreams {
|
||||
if upstream.IsLocal() != isLocal {
|
||||
continue
|
||||
}
|
||||
u = <-ch
|
||||
if u != nil {
|
||||
break
|
||||
@@ -44,6 +50,15 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
|
||||
// search local disks first
|
||||
u, err := p.epffIsLocal(ctx, upstreams, filePath, true)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
u, err = p.epffIsLocal(ctx, upstreams, filePath, false)
|
||||
}
|
||||
return u, err
|
||||
}
|
||||
|
||||
// Action category policy, governing the modification of files and directories
|
||||
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
|
||||
if len(upstreams) == 0 {
|
||||
|
||||
@@ -169,7 +169,11 @@ func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return upstreams, nil
|
||||
// If created roots then choose one
|
||||
if dir == "" {
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
return upstreams, err
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
@@ -834,6 +838,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
||||
errs := Errors(make([]error, len(opt.Upstreams)))
|
||||
multithread(len(opt.Upstreams), func(i int) {
|
||||
|
||||
@@ -24,15 +24,19 @@ var (
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
// In order to ensure memory alignment on 32-bit architectures
|
||||
// when this field is accessed through sync/atomic functions,
|
||||
// it must be the first entry in the struct
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
Opt *common.Options
|
||||
writable bool
|
||||
creatable bool
|
||||
isLocal bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
@@ -92,6 +96,7 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
f.isLocal = rFs.Features().IsLocal
|
||||
rootString := fspath.JoinRootPath(remote, root)
|
||||
myFs, err := cache.Get(ctx, rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
@@ -139,6 +144,11 @@ func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// IsLocal true if the upstream Fs is a local disk
|
||||
func (f *Fs) IsLocal() bool {
|
||||
return f.isLocal
|
||||
}
|
||||
|
||||
// UpstreamFs get the upstream Fs the entry is stored in
|
||||
func (e *Directory) UpstreamFs() *Fs {
|
||||
return e.f
|
||||
|
||||
@@ -49,6 +49,11 @@ extended explanation in the [copy](/commands/rclone_copy/) command if unsure.
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the ` + "`rclone dedupe`" + ` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
|
||||
@@ -626,3 +626,7 @@ put them back in again.` >}}
|
||||
* Lorenzo Maiorfi <maiorfi@gmail.com>
|
||||
* Claudio Maradonna <penguyman@stronzi.org>
|
||||
* Ovidiu Victor Tatar <ovi.tatar@googlemail.com>
|
||||
* Evan Spensley <epspensley@gmail.com>
|
||||
* Yen Hu <61753151+0x59656e@users.noreply.github.com>
|
||||
* Steve Kowalik <steven@wedontsleep.org>
|
||||
* Jordi Gonzalez Muñoz <jordigonzm@gmail.com>
|
||||
|
||||
@@ -116,7 +116,7 @@ This would produce something like this:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
If you then add that config to your config file (find it with `rclone
|
||||
config file`) then you can access all the shared drives in one place
|
||||
|
||||
@@ -582,7 +582,8 @@ been added) in DIR, then it will be overwritten.
|
||||
|
||||
The remote in use must support server-side move or copy and you must
|
||||
use the same remote as the destination of the sync. The backup
|
||||
directory must not overlap the destination directory.
|
||||
directory must not overlap the destination directory without it being
|
||||
excluded by a filter rule.
|
||||
|
||||
For example
|
||||
|
||||
@@ -1643,6 +1644,18 @@ This sets the interval between each retry specified by `--retries`
|
||||
|
||||
The default is `0`. Use `0` to disable.
|
||||
|
||||
### --server-side-across-configs ###
|
||||
|
||||
Allow server-side operations (e.g. copy or move) to work across
|
||||
different configurations.
|
||||
|
||||
This can be useful if you wish to do a server-side copy or move
|
||||
between two remotes which use the same backend but are configured
|
||||
differently.
|
||||
|
||||
Note that this isn't enabled by default because it isn't easy for
|
||||
rclone to tell if it will work between any two configurations.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
|
||||
@@ -1332,7 +1332,7 @@ drives found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
|
||||
@@ -318,7 +318,7 @@ such as a regular [sync](https://rclone.org/commands/rclone_sync/), you will pro
|
||||
to configure your rclone command in your operating system's scheduler. If you need to
|
||||
expose *service*-like features, such as [remote control](https://rclone.org/rc/),
|
||||
[GUI](https://rclone.org/gui/), [serve](https://rclone.org/commands/rclone_serve/)
|
||||
or [mount](https://rclone.org/commands/rclone_move/), you will often want an rclone
|
||||
or [mount](https://rclone.org/commands/rclone_mount/), you will often want an rclone
|
||||
command always running in the background, and configuring it to run in a service infrastructure
|
||||
may be a better option. Below are some alternatives on how to achieve this on
|
||||
different operating systems.
|
||||
@@ -351,7 +351,7 @@ c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclo
|
||||
|
||||
#### User account
|
||||
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_move/) documentation,
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
|
||||
mounted drives created as Administrator are not visible to other accounts, not even the
|
||||
account that was elevated as Administrator. By running the mount command as the
|
||||
built-in `SYSTEM` user account, it will create drives accessible for everyone on
|
||||
|
||||
@@ -12,11 +12,10 @@ Refer to [IAS3 API documentation](https://archive.org/services/docs/api/ias3.htm
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
|
||||
|
||||
Once you have made a remote (see the provider specific section above)
|
||||
you can use it like this:
|
||||
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
|
||||
Once you have made a remote, you can use it like this:
|
||||
|
||||
Make a new item
|
||||
|
||||
rclone mkdir remote:item
|
||||
@@ -53,6 +52,7 @@ The following are reserved by Internet Archive:
|
||||
- `format`
|
||||
- `old_version`
|
||||
- `viruscheck`
|
||||
- `summation`
|
||||
|
||||
Trying to set values to these keys is ignored with a warning.
|
||||
Only setting `mtime` is an exception. Doing so make it the identical behavior as setting ModTime.
|
||||
@@ -258,19 +258,20 @@ Here are the possible system metadata items for the internetarchive backend.
|
||||
|
||||
| Name | Help | Type | Example | Read Only |
|
||||
|------|------|------|---------|-----------|
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | N |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | N |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | N |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | N |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | N |
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | **Y** |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | **Y** |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | **Y** |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | **Y** |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | **Y** |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | **Y** |
|
||||
| rclone-ia-mtime | Time of last modification, managed by Internet Archive | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-update-track | Random value used by Rclone for tracking changes inside Internet Archive | string | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | N |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | N |
|
||||
| size | File size in bytes | decimal number | 123456 | N |
|
||||
| source | The source of the file | string | original | N |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | N |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | **Y** |
|
||||
| size | File size in bytes | decimal number | 123456 | **Y** |
|
||||
| source | The source of the file | string | original | **Y** |
|
||||
| summation | Check https://forum.rclone.org/t/31922 for how it is used | string | md5 | **Y** |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | **Y** |
|
||||
|
||||
See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
|
||||
@@ -263,7 +263,7 @@ Properties:
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by 21Vianet in China
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
### Advanced options
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.59.0
|
||||
v1.60.0
|
||||
181
fs/config.go
181
fs/config.go
@@ -45,96 +45,97 @@ var (
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
LogLevel LogLevel
|
||||
StatsLogLevel LogLevel
|
||||
UseJSONLog bool
|
||||
DryRun bool
|
||||
Interactive bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
IgnoreErrors bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
ExpectContinueTimeout time.Duration
|
||||
Dump DumpFlags
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteMode DeleteMode
|
||||
MaxDelete int64
|
||||
TrackRenames bool // Track file renames.
|
||||
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
IgnoreCaseSync bool
|
||||
NoTraverse bool
|
||||
CheckFirst bool
|
||||
NoCheckDest bool
|
||||
NoUnicodeNormalization bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest []string
|
||||
CopyDest []string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
BwLimitFile BwTimetable
|
||||
TPSLimit float64
|
||||
TPSLimitBurst int
|
||||
BindAddr net.IP
|
||||
DisableFeatures []string
|
||||
UserAgent string
|
||||
Immutable bool
|
||||
AutoConfirm bool
|
||||
StreamingUploadCutoff SizeSuffix
|
||||
StatsFileNameLength int
|
||||
AskPassword bool
|
||||
PasswordCommand SpaceSepList
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
StatsOneLineDate bool // If we want a date prefix at all
|
||||
StatsOneLineDateFormat string // If we want to customize the prefix
|
||||
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
||||
Progress bool
|
||||
ProgressTerminalTitle bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
MultiThreadCutoff SizeSuffix
|
||||
MultiThreadStreams int
|
||||
MultiThreadSet bool // whether MultiThreadStreams was set (set in fs/config/configflags)
|
||||
OrderBy string // instructions on how to order the transfer
|
||||
UploadHeaders []*HTTPOption
|
||||
DownloadHeaders []*HTTPOption
|
||||
Headers []*HTTPOption
|
||||
MetadataSet Metadata // extra metadata to write when uploading
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
DisableHTTP2 bool
|
||||
HumanReadable bool
|
||||
KvLockTime time.Duration // maximum time to keep key-value database locked by process
|
||||
DisableHTTPKeepAlives bool
|
||||
Metadata bool
|
||||
LogLevel LogLevel
|
||||
StatsLogLevel LogLevel
|
||||
UseJSONLog bool
|
||||
DryRun bool
|
||||
Interactive bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
IgnoreErrors bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
ExpectContinueTimeout time.Duration
|
||||
Dump DumpFlags
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteMode DeleteMode
|
||||
MaxDelete int64
|
||||
TrackRenames bool // Track file renames.
|
||||
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
IgnoreCaseSync bool
|
||||
NoTraverse bool
|
||||
CheckFirst bool
|
||||
NoCheckDest bool
|
||||
NoUnicodeNormalization bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest []string
|
||||
CopyDest []string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
BwLimitFile BwTimetable
|
||||
TPSLimit float64
|
||||
TPSLimitBurst int
|
||||
BindAddr net.IP
|
||||
DisableFeatures []string
|
||||
UserAgent string
|
||||
Immutable bool
|
||||
AutoConfirm bool
|
||||
StreamingUploadCutoff SizeSuffix
|
||||
StatsFileNameLength int
|
||||
AskPassword bool
|
||||
PasswordCommand SpaceSepList
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
StatsOneLineDate bool // If we want a date prefix at all
|
||||
StatsOneLineDateFormat string // If we want to customize the prefix
|
||||
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
||||
Progress bool
|
||||
ProgressTerminalTitle bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
MultiThreadCutoff SizeSuffix
|
||||
MultiThreadStreams int
|
||||
MultiThreadSet bool // whether MultiThreadStreams was set (set in fs/config/configflags)
|
||||
OrderBy string // instructions on how to order the transfer
|
||||
UploadHeaders []*HTTPOption
|
||||
DownloadHeaders []*HTTPOption
|
||||
Headers []*HTTPOption
|
||||
MetadataSet Metadata // extra metadata to write when uploading
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
DisableHTTP2 bool
|
||||
HumanReadable bool
|
||||
KvLockTime time.Duration // maximum time to keep key-value database locked by process
|
||||
DisableHTTPKeepAlives bool
|
||||
Metadata bool
|
||||
ServerSideAcrossConfigs bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
|
||||
@@ -141,6 +141,7 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.DurationVarP(flagSet, &ci.KvLockTime, "kv-lock-time", "", ci.KvLockTime, "Maximum time to keep key-value database locked by process")
|
||||
flags.BoolVarP(flagSet, &ci.DisableHTTPKeepAlives, "disable-http-keep-alives", "", ci.DisableHTTPKeepAlives, "Disable HTTP keep-alives and use each connection once.")
|
||||
flags.BoolVarP(flagSet, &ci.Metadata, "metadata", "M", ci.Metadata, "If set, preserve metadata when copying objects")
|
||||
flags.BoolVarP(flagSet, &ci.ServerSideAcrossConfigs, "server-side-across-configs", "", ci.ServerSideAcrossConfigs, "Allow server-side operations (e.g. copy) to work across different configs")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
|
||||
2
fs/fs.go
2
fs/fs.go
@@ -40,7 +40,7 @@ var (
|
||||
ErrorNotAFile = errors.New("is not a regular file")
|
||||
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
|
||||
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes (try excluding the destination with a filter rule)")
|
||||
ErrorDirectoryNotEmpty = errors.New("directory not empty")
|
||||
ErrorImmutableModified = errors.New("immutable file modified")
|
||||
ErrorPermissionDenied = errors.New("permission denied")
|
||||
|
||||
@@ -424,7 +424,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && (f.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
in := tr.Account(ctx, nil) // account the transfer
|
||||
in.ServerSideCopyStart()
|
||||
newDst, err = doCopy(ctx, src, remote)
|
||||
@@ -604,6 +604,7 @@ func SameObject(src, dst fs.Object) bool {
|
||||
// It returns the destination object if possible. Note that this may
|
||||
// be nil.
|
||||
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
@@ -618,7 +619,7 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
return newDst, nil
|
||||
}
|
||||
// See if we have Move available
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && (fdst.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
|
||||
if dst != nil && !SameObject(src, dst) {
|
||||
err = DeleteFile(ctx, dst)
|
||||
@@ -814,17 +815,6 @@ func fixRoot(f fs.Info) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Overlapping returns true if fdst and fsrc point to the same
|
||||
// underlying Fs and they overlap.
|
||||
func Overlapping(fdst, fsrc fs.Info) bool {
|
||||
if !SameConfig(fdst, fsrc) {
|
||||
return false
|
||||
}
|
||||
fdstRoot := fixRoot(fdst)
|
||||
fsrcRoot := fixRoot(fsrc)
|
||||
return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot)
|
||||
}
|
||||
|
||||
// OverlappingFilterCheck returns true if fdst and fsrc point to the same
|
||||
// underlying Fs and they overlap without fdst being excluded by any filter rule.
|
||||
func OverlappingFilterCheck(ctx context.Context, fdst fs.Fs, fsrc fs.Fs) bool {
|
||||
@@ -1848,10 +1838,10 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
|
||||
}
|
||||
if srcFileName == "" {
|
||||
if Overlapping(fdst, backupDir) {
|
||||
if OverlappingFilterCheck(ctx, backupDir, fdst) {
|
||||
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
if Overlapping(fsrc, backupDir) {
|
||||
if OverlappingFilterCheck(ctx, backupDir, fsrc) {
|
||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1243,35 +1243,6 @@ func TestSame(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlapping(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
slash := string(os.PathSeparator) // native path separator
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"namey", "root", false},
|
||||
{"name", "rooty", false},
|
||||
{"namey", "rooty", false},
|
||||
{"name", "roo", false},
|
||||
{"name", "root/toot", true},
|
||||
{"name", "root/toot/", true},
|
||||
{"name", "root" + slash + "toot", true},
|
||||
{"name", "root" + slash + "toot" + slash, true},
|
||||
{"name", "", true},
|
||||
{"name", "/", true},
|
||||
} {
|
||||
b := &testFsInfo{name: test.name, root: test.root}
|
||||
what := fmt.Sprintf("(%q,%q) vs (%q,%q)", a.name, a.root, b.name, b.root)
|
||||
actual := operations.Overlapping(a, b)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
actual = operations.Overlapping(b, a)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
}
|
||||
}
|
||||
|
||||
// testFs is for unit testing fs.Fs
|
||||
type testFs struct {
|
||||
testFsInfo
|
||||
|
||||
@@ -406,3 +406,34 @@ func rcJobStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
job.Stop()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "job/stopgroup",
|
||||
Fn: rcGroupStop,
|
||||
Title: "Stop all running jobs in a group",
|
||||
Help: `Parameters:
|
||||
|
||||
- group - name of the group (string).
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Stops all running jobs in a group
|
||||
func rcGroupStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
group, err := in.GetString("group")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
running.mu.RLock()
|
||||
defer running.mu.RUnlock()
|
||||
for _, job := range running.jobs {
|
||||
if job.Group == group {
|
||||
job.mu.Lock()
|
||||
job.Stop()
|
||||
job.mu.Unlock()
|
||||
}
|
||||
}
|
||||
out = make(rc.Params)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -452,6 +452,48 @@ func TestRcSyncJobStop(t *testing.T) {
|
||||
assert.Equal(t, false, out["success"])
|
||||
}
|
||||
|
||||
func TestRcJobStopGroup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jobID = 0
|
||||
_, _, err := NewJob(ctx, ctxFn, rc.Params{
|
||||
"_async": true,
|
||||
"_group": "myparty",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = NewJob(ctx, ctxFn, rc.Params{
|
||||
"_async": true,
|
||||
"_group": "myparty",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
call := rc.Calls.Get("job/stopgroup")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{"group": "myparty"}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, out)
|
||||
|
||||
in = rc.Params{}
|
||||
_, err = call.Fn(context.Background(), in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Didn't find key")
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
call = rc.Calls.Get("job/status")
|
||||
assert.NotNil(t, call)
|
||||
for i := 1; i <= 2; i++ {
|
||||
in = rc.Params{"jobid": i}
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, out)
|
||||
assert.Equal(t, "myparty", out["group"])
|
||||
assert.Equal(t, "context canceled", out["error"])
|
||||
assert.Equal(t, true, out["finished"])
|
||||
assert.Equal(t, false, out["success"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnFinish(t *testing.T) {
|
||||
jobID = 0
|
||||
done := make(chan struct{})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.59.0"
|
||||
var VersionTag = "v1.60.0"
|
||||
|
||||
@@ -217,6 +217,14 @@ This needs expanding and submitting to pypi...
|
||||
|
||||
Rust bindings are available in the `librclone` crate: https://crates.io/crates/librclone
|
||||
|
||||
## PHP
|
||||
|
||||
The `php` subdirectory contains how to use the C library librclone in php through foreign
|
||||
function interface (FFI).
|
||||
|
||||
Useful docs:
|
||||
- [PHP / FFI](https://www.php.net/manual/en/book.ffi.php)
|
||||
|
||||
## TODO
|
||||
|
||||
- Async jobs must currently be cancelled manually at the moment - RcloneFinalize doesn't do it.
|
||||
|
||||
53
librclone/php/rclone.php
Normal file
53
librclone/php/rclone.php
Normal file
@@ -0,0 +1,53 @@
|
||||
<?php
|
||||
/*
|
||||
PHP interface to librclone.so, using FFI ( Foreign Function Interface )
|
||||
|
||||
Create an rclone object
|
||||
|
||||
$rc = new Rclone( __DIR__ . '/librclone.so' );
|
||||
|
||||
Then call rpc calls on it
|
||||
|
||||
$rc->rpc( "config/listremotes", "{}" );
|
||||
|
||||
When finished, close it
|
||||
|
||||
$rc->close();
|
||||
*/
|
||||
|
||||
class Rclone {
|
||||
|
||||
protected $rclone;
|
||||
private $out;
|
||||
|
||||
public function __construct( $libshared )
|
||||
{
|
||||
$this->rclone = \FFI::cdef("
|
||||
struct RcloneRPCResult {
|
||||
char* Output;
|
||||
int Status;
|
||||
};
|
||||
extern void RcloneInitialize();
|
||||
extern void RcloneFinalize();
|
||||
extern struct RcloneRPCResult RcloneRPC(char* method, char* input);
|
||||
extern void RcloneFreeString(char* str);
|
||||
", $libshared);
|
||||
$this->rclone->RcloneInitialize();
|
||||
}
|
||||
|
||||
public function rpc( $method, $input ): array
|
||||
{
|
||||
$this->out = $this->rclone->RcloneRPC( $method, $input );
|
||||
$response = [
|
||||
'output' => \FFI::string( $this->out->Output ),
|
||||
'status' => $this->out->Status
|
||||
];
|
||||
$this->rclone->RcloneFreeString( $this->out->Output );
|
||||
return $response;
|
||||
}
|
||||
|
||||
public function close( ): void
|
||||
{
|
||||
$this->rclone->RcloneFinalize();
|
||||
}
|
||||
}
|
||||
55
librclone/php/test.php
Normal file
55
librclone/php/test.php
Normal file
@@ -0,0 +1,55 @@
|
||||
<?php
|
||||
/*
|
||||
Test program for librclone
|
||||
*/
|
||||
|
||||
include_once ( "rclone.php" );
|
||||
|
||||
const REMOTE = 'gdrive:/';
|
||||
const FOLDER = "rcloneTest";
|
||||
const FILE = "testFile.txt";
|
||||
|
||||
$rc = new Rclone( __DIR__ . '/librclone.so' );
|
||||
|
||||
$response = $rc->rpc( "config/listremotes", "{}" );
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/mkdir",
|
||||
json_encode( [
|
||||
'fs' => REMOTE,
|
||||
'remote'=> FOLDER
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/list",
|
||||
json_encode( [
|
||||
'fs' => REMOTE,
|
||||
'remote'=> ''
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
file_put_contents("./" . FILE, "Success!!!");
|
||||
$response = $rc->rpc("operations/copyfile",
|
||||
json_encode( [
|
||||
'srcFs' => getcwd(),
|
||||
'srcRemote'=> FILE,
|
||||
'dstFs' => REMOTE . FOLDER,
|
||||
'dstRemote' => FILE
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/list",
|
||||
json_encode( [
|
||||
'fs' => REMOTE . FOLDER,
|
||||
'remote'=> ''
|
||||
]));
|
||||
print_r( $response );
|
||||
if ( $response['output'] ) {
|
||||
$array = @json_decode( $response['output'], true );
|
||||
if ( $response['status'] == 200 && $array['list'] ?? 0 ) {
|
||||
$valid = $array['list'][0]['Name'] == FILE ? "SUCCESS" : "FAIL";
|
||||
print_r("The test seems: " . $valid . "\n");
|
||||
}
|
||||
}
|
||||
|
||||
$rc->close();
|
||||
Reference in New Issue
Block a user