mirror of
https://github.com/rclone/rclone.git
synced 2026-02-01 17:23:39 +00:00
Compare commits
69 Commits
v1.51.0
...
vfs-refact
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40b9e312c6 | ||
|
|
2b268f9724 | ||
|
|
7a5a74cecb | ||
|
|
54a0c6b8ad | ||
|
|
1ad23c4dc8 | ||
|
|
7586a345ff | ||
|
|
393b94bb70 | ||
|
|
e3c11c9ca1 | ||
|
|
3c91abce74 | ||
|
|
87d856d71b | ||
|
|
3855c003ce | ||
|
|
abb9f89f65 | ||
|
|
17b4058ee9 | ||
|
|
9663f9b2ab | ||
|
|
d6e10dba33 | ||
|
|
da5cbc194a | ||
|
|
e8eb658ba5 | ||
|
|
28f69f25a0 | ||
|
|
07e4b9bb7f | ||
|
|
708b967f15 | ||
|
|
7e2568a312 | ||
|
|
bde0334bd8 | ||
|
|
5470d34740 | ||
|
|
ac9cb50fdb | ||
|
|
4a8b548add | ||
|
|
481c8a40ea | ||
|
|
25ef3a281b | ||
|
|
219bd97e8a | ||
|
|
8b14cd24aa | ||
|
|
3893c14889 | ||
|
|
c41fbc0f90 | ||
|
|
f45425e5a9 | ||
|
|
bd9fd629bc | ||
|
|
3b19f48929 | ||
|
|
4996edc030 | ||
|
|
964f1f6a7e | ||
|
|
e75c1f70bb | ||
|
|
19a4d74ee7 | ||
|
|
55b5eded23 | ||
|
|
3dbcf0af2d | ||
|
|
4e1a511f88 | ||
|
|
b71e1a16b1 | ||
|
|
ec1271818f | ||
|
|
8318020387 | ||
|
|
c38d7be373 | ||
|
|
dc31212c3d | ||
|
|
ac60b36e77 | ||
|
|
1d73f071f6 | ||
|
|
5c869d5bd3 | ||
|
|
a54210a2e4 | ||
|
|
040d226028 | ||
|
|
8b664c3ec5 | ||
|
|
102a38bb95 | ||
|
|
7a54e13110 | ||
|
|
feee92c790 | ||
|
|
de93852512 | ||
|
|
dfb710eab7 | ||
|
|
25cfeb2a64 | ||
|
|
90377f5e65 | ||
|
|
f1d9bd5eab | ||
|
|
4ee3c21a9d | ||
|
|
fe6f4135b4 | ||
|
|
3dfa63b85c | ||
|
|
ff2343475a | ||
|
|
bffd7f0f14 | ||
|
|
7c55fafe33 | ||
|
|
2e7fe06beb | ||
|
|
8ff91ff31b | ||
|
|
4d1c616e97 |
26
.github/workflows/build.yml
vendored
26
.github/workflows/build.yml
vendored
@@ -19,12 +19,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.10', 'go1.11', 'go1.12']
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.11', 'go1.12', 'go1.13']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
@@ -73,17 +73,11 @@ jobs:
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.10
|
||||
os: ubuntu-latest
|
||||
go: '1.10.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
@@ -96,6 +90,12 @@ jobs:
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
@@ -148,6 +148,7 @@ with modules beneath.
|
||||
* ...commands
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
@@ -203,6 +204,9 @@ don't need to run these when adding a feature.
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
|
||||
@@ -8,10 +8,7 @@
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
|
||||
[](https://travis-ci.org/rclone/rclone)
|
||||
[](https://ci.appveyor.com/project/rclone/rclone)
|
||||
[](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
|
||||
[](https://circleci.com/gh/rclone/rclone/tree/master)
|
||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
@@ -22,7 +22,7 @@ This file describes how to make the various kinds of releases
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -199,7 +198,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootContainer == "" {
|
||||
return fmt.Sprintf("Azure root")
|
||||
return "Azure root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("Azure container %s", f.rootContainer)
|
||||
@@ -1121,22 +1120,6 @@ func (o *Object) readMetaData() (err error) {
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return err
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
|
||||
39
backend/cache/cache_internal_test.go
vendored
39
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -1139,23 +1139,6 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
return f
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
r.writeRemoteBytes(t, f, remote, testData)
|
||||
return remote
|
||||
}
|
||||
|
||||
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
return r.writeObjectBytes(t, f, remote, testData)
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
|
||||
r.writeRemoteBytes(t, f, remote, []byte(content))
|
||||
}
|
||||
@@ -1344,26 +1327,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
||||
var err error
|
||||
var l []string
|
||||
if r.useMount {
|
||||
var list []os.FileInfo
|
||||
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
|
||||
for _, ll := range list {
|
||||
l = append(l, ll.Name())
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll.Remote())
|
||||
}
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return l
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
|
||||
7
backend/cache/cache_mount_other_test.go
vendored
7
backend/cache/cache_mount_other_test.go
vendored
@@ -1,7 +1,8 @@
|
||||
// +build !linux !go1.11
|
||||
// +build !darwin !go1.11
|
||||
// +build !freebsd !go1.11
|
||||
// +build !linux !go1.13
|
||||
// +build !darwin !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
// +build !windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
3
backend/cache/cache_mount_unix_test.go
vendored
3
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_mount_windows_test.go
vendored
1
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_test.go
vendored
1
backend/cache/cache_test.go
vendored
@@ -1,6 +1,7 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
1
backend/cache/cache_upload_test.go
vendored
1
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
9
backend/cache/directory.go
vendored
9
backend/cache/directory.go
vendored
@@ -101,15 +101,6 @@ func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
|
||||
25
backend/cache/storage_persistent.go
vendored
25
backend/cache/storage_persistent.go
vendored
@@ -767,31 +767,6 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Persistent) dumpRoot() string {
|
||||
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
|
||||
|
||||
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
c := buk.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
buk2 := buk.Bucket(k)
|
||||
m[string(k)] = itBuckets(buk2)
|
||||
} else {
|
||||
m[string(k)] = "-"
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
var mm map[string]interface{}
|
||||
_ = b.db.View(func(tx *bolt.Tx) error {
|
||||
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
|
||||
return nil
|
||||
})
|
||||
raw, _ := json.MarshalIndent(mm, "", " ")
|
||||
return string(raw)
|
||||
}
|
||||
|
||||
// addPendingUpload adds a new file to the pending queue of uploads
|
||||
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
|
||||
@@ -47,8 +47,10 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Name: "directory_name_encryption",
|
||||
Help: `Option to either encrypt directory names or leave them intact.
|
||||
|
||||
NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
|
||||
@@ -190,7 +190,11 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.ftpConnection()
|
||||
c, err = f.ftpConnection()
|
||||
if err != nil && f.opt.Concurrency > 0 {
|
||||
f.tokens.Put()
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return an FTP connection to the pool
|
||||
@@ -203,7 +207,13 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
@@ -778,11 +788,13 @@ func (f *ftpReadCloser) Close() error {
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
f.f.putFtpConnection(nil, nil)
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
f.f.putFtpConnection(nil, nil)
|
||||
} else {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
@@ -857,6 +869,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
|
||||
@@ -253,7 +253,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
@@ -1540,21 +1540,74 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
|
||||
return response, err
|
||||
}
|
||||
|
||||
// getPosition gets the current position in a multipart upload
|
||||
func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: url,
|
||||
}
|
||||
var info api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(info.NextExpectedRanges) != 1 {
|
||||
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
|
||||
}
|
||||
position := info.NextExpectedRanges[0]
|
||||
i := strings.IndexByte(position, '-')
|
||||
if i < 0 {
|
||||
return 0, errors.Errorf("no '-' in next expected range: %q", position)
|
||||
}
|
||||
position = position[:i]
|
||||
pos, err = strconv.ParseInt(position, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "bad expected range: %q", position)
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// uploadFragment uploads a part
|
||||
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: url,
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
|
||||
Body: chunk,
|
||||
}
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
var skip = int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
toSend := chunkSize - skip
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: url,
|
||||
ContentLength: &toSend,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
|
||||
Body: chunk,
|
||||
}
|
||||
_, _ = chunk.Seek(skip, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
|
||||
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
|
||||
pos, posErr := o.getPosition(ctx, url)
|
||||
if posErr != nil {
|
||||
fs.Debugf(o, "Failed to read position: %v", posErr)
|
||||
return false, posErr
|
||||
}
|
||||
skip = pos - start
|
||||
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
|
||||
switch {
|
||||
case skip < 0:
|
||||
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
|
||||
case skip > chunkSize:
|
||||
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
|
||||
case skip == chunkSize:
|
||||
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
|
||||
return false, nil
|
||||
}
|
||||
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
|
||||
@@ -315,14 +315,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
return f.root
|
||||
}
|
||||
return f.root + "/"
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
@@ -898,11 +890,6 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return o.fs.opt.Enc.FromStandardPath(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
@@ -993,14 +980,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// metaHash returns a rough hash of metadata to detect if object has been updated
|
||||
func (o *Object) metaHash() string {
|
||||
if !o.hasMetaData {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("remote=%q, size=%d, modTime=%v, id=%q, mimeType=%q", o.remote, o.size, o.modTime, o.id, o.mimeType)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
|
||||
@@ -392,7 +392,7 @@ func (f *Fs) Root() string {
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("QingStor root")
|
||||
return "QingStor root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
|
||||
|
||||
@@ -297,21 +297,6 @@ func (mu *multiUploader) send(c chunk) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// list list the ObjectParts of an multipart upload
|
||||
func (mu *multiUploader) list() error {
|
||||
bucketInit, _ := mu.bucketInit()
|
||||
|
||||
req := qs.ListMultipartInput{
|
||||
UploadID: mu.uploadID,
|
||||
}
|
||||
fs.Debugf(mu, "Reading multi-part details")
|
||||
rsp, err := bucketInit.ListMultipart(mu.cfg.key, &req)
|
||||
if err == nil {
|
||||
mu.objectParts = rsp.ObjectParts
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// complete complete an multipart upload
|
||||
func (mu *multiUploader) complete() error {
|
||||
var err error
|
||||
|
||||
118
backend/s3/s3.go
118
backend/s3/s3.go
@@ -56,6 +56,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -841,24 +842,38 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
|
||||
// - doubled / encoding
|
||||
// - trailing / encoding
|
||||
// so that AWS keys are always valid file names
|
||||
Default: (encoder.EncodeInvalidUtf8 |
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot),
|
||||
}},
|
||||
})
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -887,21 +902,25 @@ type Options struct {
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
name string // the name of the remote
|
||||
root string // root of the bucket - ignore all objects above this
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
c *s3.S3 // the connection to the s3 server
|
||||
ses *session.Session // the s3 session
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
poolMu sync.Mutex // mutex protecting memory pools map
|
||||
pools map[int64]*pool.Pool // memory pools
|
||||
}
|
||||
|
||||
// Object describes a s3 object
|
||||
@@ -951,7 +970,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var retryErrorCodes = []int{
|
||||
// 409, // Conflict - various states that could be resolved on a retry
|
||||
500, // Internal Server Error - "We encountered an internal error. Please try again."
|
||||
503, // Service Unavailable/Slow Down - "Reduce your request rate"
|
||||
}
|
||||
|
||||
@@ -1074,7 +1093,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
opt.ForcePathStyle = false
|
||||
}
|
||||
awsConfig := aws.NewConfig().
|
||||
WithMaxRetries(maxRetries).
|
||||
WithMaxRetries(fs.Config.LowLevelRetries).
|
||||
WithCredentials(cred).
|
||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||
WithS3ForcePathStyle(opt.ForcePathStyle).
|
||||
@@ -1180,15 +1199,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 0 because we are relying on SDK retry mechanism.
|
||||
// Setting it to 1 because in context of pacer it means 1 attempt.
|
||||
pc.SetRetries(1)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
c: c,
|
||||
ses: ses,
|
||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
pacer: pc,
|
||||
cache: bucket.NewCache(),
|
||||
srv: fshttp.NewClient(fs.Config),
|
||||
pools: make(map[int64]*pool.Pool),
|
||||
}
|
||||
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -1338,7 +1365,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
//
|
||||
// So we enable only on providers we know supports it properly, all others can retry when a
|
||||
// XML Syntax error is detected.
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba")
|
||||
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
|
||||
for {
|
||||
// FIXME need to implement ALL loop
|
||||
req := s3.ListObjectsInput{
|
||||
@@ -1772,8 +1799,9 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
fs.Debugf(nil, "Cancelling multipart copy")
|
||||
_ = f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: &dstBucket,
|
||||
Key: &dstPath,
|
||||
UploadId: uid,
|
||||
@@ -1875,6 +1903,22 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
|
||||
_, ok := f.pools[size]
|
||||
if !ok {
|
||||
f.pools[size] = pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(f.opt.ChunkSize),
|
||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
return f.pools[size]
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -2078,16 +2122,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
if concurrency < 1 {
|
||||
concurrency = 1
|
||||
}
|
||||
bufs := make(chan []byte, concurrency)
|
||||
defer func() {
|
||||
// empty the channel on exit
|
||||
close(bufs)
|
||||
for range bufs {
|
||||
}
|
||||
}()
|
||||
for i := 0; i < concurrency; i++ {
|
||||
bufs <- nil
|
||||
}
|
||||
tokens := pacer.NewTokenDispenser(concurrency)
|
||||
|
||||
// calculate size of parts
|
||||
partSize := int(f.opt.ChunkSize)
|
||||
@@ -2108,6 +2143,8 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
}
|
||||
|
||||
memPool := f.getMemoryPool(int64(partSize))
|
||||
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -2136,7 +2173,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
// We can try to abort the upload, but ignore the error.
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
UploadId: uid,
|
||||
@@ -2159,10 +2196,14 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
)
|
||||
|
||||
for partNum := int64(1); !finished; partNum++ {
|
||||
// Get a block of memory from the channel (which limits concurrency)
|
||||
buf := <-bufs
|
||||
if buf == nil {
|
||||
buf = make([]byte, partSize)
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
@@ -2220,8 +2261,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return false, nil
|
||||
})
|
||||
|
||||
// return the memory
|
||||
bufs <- buf[:partSize]
|
||||
// return the memory and token
|
||||
memPool.Put(buf[:partSize])
|
||||
tokens.Put()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to upload part")
|
||||
|
||||
@@ -838,7 +838,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
},
|
||||
}
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
@@ -1138,7 +1138,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/memtest"
|
||||
_ "github.com/rclone/rclone/cmd/mkdir"
|
||||
_ "github.com/rclone/rclone/cmd/mount"
|
||||
_ "github.com/rclone/rclone/cmd/mount2"
|
||||
_ "github.com/rclone/rclone/cmd/move"
|
||||
_ "github.com/rclone/rclone/cmd/moveto"
|
||||
_ "github.com/rclone/rclone/cmd/ncdu"
|
||||
|
||||
@@ -42,11 +42,11 @@ You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|
||||
Or like this to output any file in dir or subdirectories.
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|
||||
Or like this to output any .txt files in dir or subdirectories.
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
|
||||
34
cmd/cmd.go
34
cmd/cmd.go
@@ -226,7 +226,7 @@ func ShowStats() bool {
|
||||
|
||||
// Run the function with stats and retries if required
|
||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
var err error
|
||||
var cmdErr error
|
||||
stopStats := func() {}
|
||||
if !showStats && ShowStats() {
|
||||
showStats = true
|
||||
@@ -238,11 +238,11 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
SigInfoHandler()
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
err = fs.CountError(err)
|
||||
cmdErr = f()
|
||||
cmdErr = fs.CountError(cmdErr)
|
||||
lastErr := accounting.GlobalStats().GetLastError()
|
||||
if err == nil {
|
||||
err = lastErr
|
||||
if cmdErr == nil {
|
||||
cmdErr = lastErr
|
||||
}
|
||||
if !Retry || !accounting.GlobalStats().Errored() {
|
||||
if try > 1 {
|
||||
@@ -278,15 +278,6 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
}
|
||||
}
|
||||
stopStats()
|
||||
if err != nil {
|
||||
nerrs := accounting.GlobalStats().GetErrors()
|
||||
if nerrs <= 1 {
|
||||
log.Printf("Failed to %s: %v", cmd.Name(), err)
|
||||
} else {
|
||||
log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, err)
|
||||
}
|
||||
resolveExitCode(err)
|
||||
}
|
||||
if showStats && (accounting.GlobalStats().Errored() || *statsInterval > 0) {
|
||||
accounting.GlobalStats().Log()
|
||||
}
|
||||
@@ -294,7 +285,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
|
||||
// dump all running go-routines
|
||||
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
|
||||
err = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to dump goroutines: %v", err)
|
||||
}
|
||||
@@ -305,15 +296,22 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
c := exec.Command("lsof", "-p", strconv.Itoa(os.Getpid()))
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
err = c.Run()
|
||||
err := c.Run()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to list open files: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if accounting.GlobalStats().Errored() {
|
||||
resolveExitCode(accounting.GlobalStats().GetLastError())
|
||||
// Log the final error message and exit
|
||||
if cmdErr != nil {
|
||||
nerrs := accounting.GlobalStats().GetErrors()
|
||||
if nerrs <= 1 {
|
||||
log.Printf("Failed to %s: %v", cmd.Name(), cmdErr)
|
||||
} else {
|
||||
log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr)
|
||||
}
|
||||
}
|
||||
resolveExitCode(cmdErr)
|
||||
}
|
||||
|
||||
// CheckArgs checks there are enough arguments and prints a message if not
|
||||
|
||||
@@ -371,12 +371,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
name = "mount"
|
||||
}
|
||||
mountlib.NewMountCommand(name, Mount)
|
||||
mountlib.NewMountCommand(name, false, Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
|
||||
@@ -146,7 +146,7 @@ you would do:
|
||||
If any of the parameters passed is a password field, then rclone will
|
||||
automatically obscure them before putting them in the config file.
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
|
||||
@@ -25,6 +25,7 @@ date: %s
|
||||
title: "%s"
|
||||
slug: %s
|
||||
url: %s
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in %s and as part of making a release run "make commanddocs"
|
||||
---
|
||||
`
|
||||
|
||||
@@ -67,7 +68,8 @@ rclone.org website.`,
|
||||
name := filepath.Base(filename)
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
url := "/commands/" + strings.ToLower(base) + "/"
|
||||
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||||
source := strings.Replace(strings.Replace(base, "rclone", "cmd", -1), "_", "/", -1) + "/"
|
||||
return fmt.Sprintf(gendocFrontmatterTemplate, now, strings.Replace(base, "_", " ", -1), base, url, source)
|
||||
}
|
||||
linkHandler := func(name string) string {
|
||||
base := strings.TrimSuffix(name, path.Ext(name))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -42,12 +41,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount", Mount)
|
||||
mountlib.NewMountCommand("mount", false, Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
@@ -32,12 +32,14 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device),
|
||||
fuse.VolumeName(mountlib.VolumeName),
|
||||
fuse.AsyncRead(),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if mountlib.NoAppleDouble {
|
||||
options = append(options, fuse.NoAppleDouble())
|
||||
}
|
||||
@@ -51,7 +53,8 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
options = append(options, fuse.AllowRoot())
|
||||
// options = append(options, fuse.AllowRoot())
|
||||
fs.Errorf(nil, "Ignoring --allow-root. Support has been removed upstream - see https://github.com/bazil/fuse/issues/144 for more info")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
options = append(options, fuse.DefaultPermissions())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
|
||||
package mount
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// Invert the build constraint: linux,go1.11 darwin,go1.11 freebsd,go1.11
|
||||
// Invert the build constraint: linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
//
|
||||
// !((linux&&go1.11) || (darwin&&go1.11) || (freebsd&&go1.11))
|
||||
// == !(linux&&go1.11) && !(darwin&&go1.11) && !(freebsd&&go1.11))
|
||||
// == (!linux || !go1.11) && (!darwin || go1.11) && (!freebsd || !go1.11))
|
||||
// !((linux&&go1.13) || (darwin&&go1.13) || (freebsd&&go1.13))
|
||||
// == !(linux&&go1.13) && !(darwin&&go1.13) && !(freebsd&&go1.13))
|
||||
// == (!linux || !go1.13) && (!darwin || go1.13) && (!freebsd || !go1.13))
|
||||
|
||||
// +build !linux !go1.11
|
||||
// +build !darwin !go1.11
|
||||
// +build !freebsd !go1.11
|
||||
// +build !linux !go1.13
|
||||
// +build !darwin !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
|
||||
package mount
|
||||
|
||||
149
cmd/mount2/file.go
Normal file
149
cmd/mount2/file.go
Normal file
@@ -0,0 +1,149 @@
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// FileHandle is a resource identifier for opened files. Usually, a
|
||||
// FileHandle should implement some of the FileXxxx interfaces.
|
||||
//
|
||||
// All of the FileXxxx operations can also be implemented at the
|
||||
// InodeEmbedder level, for example, one can implement NodeReader
|
||||
// instead of FileReader.
|
||||
//
|
||||
// FileHandles are useful in two cases: First, if the underlying
|
||||
// storage systems needs a handle for reading/writing. This is the
|
||||
// case with Unix system calls, which need a file descriptor (See also
|
||||
// the function `NewLoopbackFile`). Second, it is useful for
|
||||
// implementing files whose contents are not tied to an inode. For
|
||||
// example, a file like `/proc/interrupts` has no fixed content, but
|
||||
// changes on each open call. This means that each file handle must
|
||||
// have its own view of the content; this view can be tied to a
|
||||
// FileHandle. Files that have such dynamic content should return the
|
||||
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
|
||||
// for an example.
|
||||
type FileHandle struct {
|
||||
h vfs.Handle
|
||||
}
|
||||
|
||||
// Create a new FileHandle
|
||||
func newFileHandle(h vfs.Handle) *FileHandle {
|
||||
return &FileHandle{
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Check interface satistfied
|
||||
var _ fusefs.FileHandle = (*FileHandle)(nil)
|
||||
|
||||
// The String method is for debug printing.
|
||||
func (f *FileHandle) String() string {
|
||||
return fmt.Sprintf("fh=%p(%s)", f, f.h.Node().Path())
|
||||
}
|
||||
|
||||
// Read data from a file. The data should be returned as
|
||||
// ReadResult, which may be constructed from the incoming
|
||||
// `dest` buffer.
|
||||
func (f *FileHandle) Read(ctx context.Context, dest []byte, off int64) (res fuse.ReadResult, errno syscall.Errno) {
|
||||
var n int
|
||||
var err error
|
||||
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
|
||||
n, err = f.h.ReadAt(dest, off)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return fuse.ReadResultData(dest[:n]), translateError(err)
|
||||
}
|
||||
|
||||
var _ fusefs.FileReader = (*FileHandle)(nil)
|
||||
|
||||
// Write the data into the file handle at given offset. After
|
||||
// returning, the data will be reused and may not referenced.
|
||||
func (f *FileHandle) Write(ctx context.Context, data []byte, off int64) (written uint32, errno syscall.Errno) {
|
||||
var n int
|
||||
var err error
|
||||
defer log.Trace(f, "off=%d", off)("n=%d, off=%d, errno=%v", &n, &off, &errno)
|
||||
n, err = f.h.WriteAt(data, off)
|
||||
return uint32(n), translateError(err)
|
||||
}
|
||||
|
||||
var _ fusefs.FileWriter = (*FileHandle)(nil)
|
||||
|
||||
// Flush is called for the close(2) call on a file descriptor. In case
|
||||
// of a descriptor that was duplicated using dup(2), it may be called
|
||||
// more than once for the same FileHandle.
|
||||
func (f *FileHandle) Flush(ctx context.Context) syscall.Errno {
|
||||
return translateError(f.h.Flush())
|
||||
}
|
||||
|
||||
var _ fusefs.FileFlusher = (*FileHandle)(nil)
|
||||
|
||||
// Release is called to before a FileHandle is forgotten. The
|
||||
// kernel ignores the return value of this method,
|
||||
// so any cleanup that requires specific synchronization or
|
||||
// could fail with I/O errors should happen in Flush instead.
|
||||
func (f *FileHandle) Release(ctx context.Context) syscall.Errno {
|
||||
return translateError(f.h.Release())
|
||||
}
|
||||
|
||||
var _ fusefs.FileReleaser = (*FileHandle)(nil)
|
||||
|
||||
// Fsync is a signal to ensure writes to the Inode are flushed
|
||||
// to stable storage.
|
||||
func (f *FileHandle) Fsync(ctx context.Context, flags uint32) (errno syscall.Errno) {
|
||||
return translateError(f.h.Sync())
|
||||
}
|
||||
|
||||
var _ fusefs.FileFsyncer = (*FileHandle)(nil)
|
||||
|
||||
// Getattr reads attributes for an Inode. The library will ensure that
|
||||
// Mode and Ino are set correctly. For files that are not opened with
|
||||
// FOPEN_DIRECTIO, Size should be set so it can be read correctly. If
|
||||
// returning zeroed permissions, the default behavior is to change the
|
||||
// mode of 0755 (directory) or 0644 (files). This can be switched off
|
||||
// with the Options.NullPermissions setting. If blksize is unset, 4096
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
func (f *FileHandle) Getattr(ctx context.Context, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "")("attr=%v, errno=%v", &out, &errno)
|
||||
setAttrOut(f.h.Node(), out)
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ fusefs.FileGetattrer = (*FileHandle)(nil)
|
||||
|
||||
// Setattr sets attributes for an Inode.
|
||||
func (f *FileHandle) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(f, "in=%v", in)("attr=%v, errno=%v", &out, &errno)
|
||||
var err error
|
||||
setAttrOut(f.h.Node(), out)
|
||||
size, ok := in.GetSize()
|
||||
if ok {
|
||||
err = f.h.Truncate(int64(size))
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
out.Attr.Size = size
|
||||
}
|
||||
mtime, ok := in.GetMTime()
|
||||
if ok {
|
||||
err = f.h.Node().SetModTime(mtime)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
out.Attr.Mtime = uint64(mtime.Unix())
|
||||
out.Attr.Mtimensec = uint32(mtime.Nanosecond())
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ fusefs.FileSetattrer = (*FileHandle)(nil)
|
||||
131
cmd/mount2/fs.go
Normal file
131
cmd/mount2/fs.go
Normal file
@@ -0,0 +1,131 @@
|
||||
// FUSE main Fs
|
||||
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// FS represents the top level filing system
|
||||
type FS struct {
|
||||
VFS *vfs.VFS
|
||||
f fs.Fs
|
||||
}
|
||||
|
||||
// NewFS creates a pathfs.FileSystem from the fs.Fs passed in
|
||||
func NewFS(f fs.Fs) *FS {
|
||||
fsys := &FS{
|
||||
VFS: vfs.New(f, &vfsflags.Opt),
|
||||
f: f,
|
||||
}
|
||||
return fsys
|
||||
}
|
||||
|
||||
// Root returns the root node
|
||||
func (f *FS) Root() (node *Node, err error) {
|
||||
defer log.Trace("", "")("node=%+v, err=%v", &node, &err)
|
||||
root, err := f.VFS.Root()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNode(f, root), nil
|
||||
}
|
||||
|
||||
// SetDebug if called, provide debug output through the log package.
|
||||
func (f *FS) SetDebug(debug bool) {
|
||||
fs.Debugf(f.f, "SetDebug %v", debug)
|
||||
}
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
return uint32(Mode)
|
||||
}
|
||||
|
||||
// fill in attr from node
|
||||
func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
Size := uint64(node.Size())
|
||||
const BlockSize = 512
|
||||
Blocks := (Size + BlockSize - 1) / BlockSize
|
||||
modTime := node.ModTime()
|
||||
// set attributes
|
||||
vfs := node.VFS()
|
||||
attr.Owner.Gid = vfs.Opt.UID
|
||||
attr.Owner.Uid = vfs.Opt.GID
|
||||
attr.Mode = getMode(node)
|
||||
attr.Size = Size
|
||||
attr.Nlink = 1
|
||||
attr.Blocks = Blocks
|
||||
// attr.Blksize = BlockSize // not supported in freebsd/darwin, defaults to 4k if not set
|
||||
s := uint64(modTime.Unix())
|
||||
ns := uint32(modTime.Nanosecond())
|
||||
attr.Atime = s
|
||||
attr.Atimensec = ns
|
||||
attr.Mtime = s
|
||||
attr.Mtimensec = ns
|
||||
attr.Ctime = s
|
||||
attr.Ctimensec = ns
|
||||
//attr.Rdev
|
||||
}
|
||||
|
||||
// fill in AttrOut from node
|
||||
func setAttrOut(node vfs.Node, out *fuse.AttrOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetTimeout(mountlib.AttrTimeout)
|
||||
}
|
||||
|
||||
// fill in EntryOut from node
|
||||
func setEntryOut(node vfs.Node, out *fuse.EntryOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetEntryTimeout(mountlib.AttrTimeout)
|
||||
out.SetAttrTimeout(mountlib.AttrTimeout)
|
||||
}
|
||||
|
||||
// Translate errors from mountlib into Syscall error numbers
|
||||
func translateError(err error) syscall.Errno {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
switch errors.Cause(err) {
|
||||
case vfs.OK:
|
||||
return 0
|
||||
case vfs.ENOENT:
|
||||
return syscall.ENOENT
|
||||
case vfs.EEXIST:
|
||||
return syscall.EEXIST
|
||||
case vfs.EPERM:
|
||||
return syscall.EPERM
|
||||
case vfs.ECLOSED:
|
||||
return syscall.EBADF
|
||||
case vfs.ENOTEMPTY:
|
||||
return syscall.ENOTEMPTY
|
||||
case vfs.ESPIPE:
|
||||
return syscall.ESPIPE
|
||||
case vfs.EBADF:
|
||||
return syscall.EBADF
|
||||
case vfs.EROFS:
|
||||
return syscall.EROFS
|
||||
case vfs.ENOSYS:
|
||||
return syscall.ENOSYS
|
||||
case vfs.EINVAL:
|
||||
return syscall.EINVAL
|
||||
}
|
||||
fs.Errorf(nil, "IO error: %v", err)
|
||||
return syscall.EIO
|
||||
}
|
||||
277
cmd/mount2/mount.go
Normal file
277
cmd/mount2/mount.go
Normal file
@@ -0,0 +1,277 @@
|
||||
// Package mount implents a FUSE mounting system for rclone remotes.
|
||||
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/okzk/sdnotify"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mountlib.NewMountCommand("mount2", true, Mount)
|
||||
}
|
||||
|
||||
// mountOptions configures the options from the command line flags
|
||||
//
|
||||
// man mount.fuse for more info and note the -o flag for other options
|
||||
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
mountOpts = &fuse.MountOptions{
|
||||
AllowOther: mountlib.AllowOther,
|
||||
FsName: device,
|
||||
Name: "rclone",
|
||||
DisableXAttrs: true,
|
||||
Debug: mountlib.DebugFUSE,
|
||||
MaxReadAhead: int(mountlib.MaxReadAhead),
|
||||
|
||||
// RememberInodes: true,
|
||||
// SingleThreaded: true,
|
||||
|
||||
/*
|
||||
AllowOther bool
|
||||
|
||||
// Options are passed as -o string to fusermount.
|
||||
Options []string
|
||||
|
||||
// Default is _DEFAULT_BACKGROUND_TASKS, 12. This numbers
|
||||
// controls the allowed number of requests that relate to
|
||||
// async I/O. Concurrency for synchronous I/O is not limited.
|
||||
MaxBackground int
|
||||
|
||||
// Write size to use. If 0, use default. This number is
|
||||
// capped at the kernel maximum.
|
||||
MaxWrite int
|
||||
|
||||
// Max read ahead to use. If 0, use default. This number is
|
||||
// capped at the kernel maximum.
|
||||
MaxReadAhead int
|
||||
|
||||
// If IgnoreSecurityLabels is set, all security related xattr
|
||||
// requests will return NO_DATA without passing through the
|
||||
// user defined filesystem. You should only set this if you
|
||||
// file system implements extended attributes, and you are not
|
||||
// interested in security labels.
|
||||
IgnoreSecurityLabels bool // ignoring labels should be provided as a fusermount mount option.
|
||||
|
||||
// If RememberInodes is set, we will never forget inodes.
|
||||
// This may be useful for NFS.
|
||||
RememberInodes bool
|
||||
|
||||
// Values shown in "df -T" and friends
|
||||
// First column, "Filesystem"
|
||||
FsName string
|
||||
|
||||
// Second column, "Type", will be shown as "fuse." + Name
|
||||
Name string
|
||||
|
||||
// If set, wrap the file system in a single-threaded locking wrapper.
|
||||
SingleThreaded bool
|
||||
|
||||
// If set, return ENOSYS for Getxattr calls, so the kernel does not issue any
|
||||
// Xattr operations at all.
|
||||
DisableXAttrs bool
|
||||
|
||||
// If set, print debugging information.
|
||||
Debug bool
|
||||
|
||||
// If set, ask kernel to forward file locks to FUSE. If using,
|
||||
// you must implement the GetLk/SetLk/SetLkw methods.
|
||||
EnableLocks bool
|
||||
|
||||
// If set, ask kernel not to do automatic data cache invalidation.
|
||||
// The filesystem is fully responsible for invalidating data cache.
|
||||
ExplicitDataCacheControl bool
|
||||
*/
|
||||
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if mountlib.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if mountlib.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
if mountlib.AllowRoot {
|
||||
opts = append(opts, "allow_root")
|
||||
}
|
||||
if mountlib.DefaultPermissions {
|
||||
opts = append(opts, "default_permissions")
|
||||
}
|
||||
if fsys.VFS.Opt.ReadOnly {
|
||||
opts = append(opts, "ro")
|
||||
}
|
||||
if mountlib.WritebackCache {
|
||||
log.Printf("FIXME --write-back-cache not supported")
|
||||
// FIXME opts = append(opts,fuse.WritebackCache())
|
||||
}
|
||||
// Some OS X only options
|
||||
if runtime.GOOS == "darwin" {
|
||||
opts = append(opts,
|
||||
// VolumeName sets the volume name shown in Finder.
|
||||
fmt.Sprintf("volname=%s", device),
|
||||
|
||||
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
|
||||
// prefix "com.apple.". This disables persistent Finder state and
|
||||
// other such information.
|
||||
"noapplexattr",
|
||||
|
||||
// NoAppleDouble makes OSXFUSE disallow files with names used by OS X
|
||||
// to store extended attributes on file systems that do not support
|
||||
// them natively.
|
||||
//
|
||||
// Such file names are:
|
||||
//
|
||||
// ._*
|
||||
// .DS_Store
|
||||
"noappledouble",
|
||||
)
|
||||
}
|
||||
mountOpts.Options = opts
|
||||
return mountOpts
|
||||
}
|
||||
|
||||
// mount the file system
|
||||
//
|
||||
// The mount point will be ready when this returns.
|
||||
//
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(f)
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
// }
|
||||
// nodeFs := fusefs.NewPathNodeFs(fsys, nodeFsOpts)
|
||||
|
||||
//mOpts := fusefs.NewOptions() // default options
|
||||
// FIXME
|
||||
// mOpts.EntryTimeout = 10 * time.Second
|
||||
// mOpts.AttrTimeout = 10 * time.Second
|
||||
// mOpts.NegativeTimeout = 10 * time.Second
|
||||
//mOpts.Debug = mountlib.DebugFUSE
|
||||
|
||||
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
|
||||
mountOpts := mountOptions(fsys, f)
|
||||
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
MountOptions: *mountOpts,
|
||||
EntryTimeout: &mountlib.AttrTimeout,
|
||||
AttrTimeout: &mountlib.AttrTimeout,
|
||||
// UID
|
||||
// GID
|
||||
}
|
||||
|
||||
root, err := fsys.Root()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
rawFS := fusefs.NewNodeFS(root, &opts)
|
||||
server, err := fuse.NewServer(rawFS, mountpoint, &opts.MountOptions)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
//mountOpts := &fuse.MountOptions{}
|
||||
//server, err := fusefs.Mount(mountpoint, fsys, &opts)
|
||||
// server, err := fusefs.Mount(mountpoint, root, &opts)
|
||||
// if err != nil {
|
||||
// return nil, nil, nil, err
|
||||
// }
|
||||
|
||||
umount := func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
return server.Unmount()
|
||||
}
|
||||
|
||||
// serverSettings := server.KernelSettings()
|
||||
// fs.Debugf(f, "Server settings %+v", serverSettings)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
errs := make(chan error, 1)
|
||||
go func() {
|
||||
server.Serve()
|
||||
errs <- nil
|
||||
}()
|
||||
|
||||
fs.Debugf(f, "Waiting for the mount to start...")
|
||||
err = server.WaitMount()
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Mount started")
|
||||
return fsys.VFS, errs, umount, nil
|
||||
}
|
||||
|
||||
// Mount mounts the remote at mountpoint.
|
||||
//
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
vfs, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
|
||||
sigInt := make(chan os.Signal, 1)
|
||||
signal.Notify(sigInt, syscall.SIGINT, syscall.SIGTERM)
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
waitloop:
|
||||
for {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-errChan:
|
||||
break waitloop
|
||||
// Program abort: umount
|
||||
case <-sigInt:
|
||||
err = unmount()
|
||||
break waitloop
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := vfs.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = sdnotify.Stopping()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to umount FUSE fs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
13
cmd/mount2/mount_test.go
Normal file
13
cmd/mount2/mount_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib/mounttest"
|
||||
)
|
||||
|
||||
func TestMount(t *testing.T) {
|
||||
mounttest.RunTests(t, mount)
|
||||
}
|
||||
7
cmd/mount2/mount_unsupported.go
Normal file
7
cmd/mount2/mount_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for mount for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !linux
|
||||
// +build !darwin !amd64
|
||||
|
||||
package mount2
|
||||
400
cmd/mount2/node.go
Normal file
400
cmd/mount2/node.go
Normal file
@@ -0,0 +1,400 @@
|
||||
// +build linux darwin,amd64
|
||||
|
||||
package mount2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Node represents a directory or file
|
||||
type Node struct {
|
||||
fusefs.Inode
|
||||
node vfs.Node
|
||||
fsys *FS
|
||||
}
|
||||
|
||||
// Node types must be InodeEmbedders
|
||||
var _ fusefs.InodeEmbedder = (*Node)(nil)
|
||||
|
||||
// newNode creates a new fusefs.Node from a vfs Node
|
||||
func newNode(fsys *FS, node vfs.Node) *Node {
|
||||
return &Node{
|
||||
node: node,
|
||||
fsys: fsys,
|
||||
}
|
||||
}
|
||||
|
||||
// String used for pretty printing.
|
||||
func (n *Node) String() string {
|
||||
return n.node.Path()
|
||||
}
|
||||
|
||||
// lookup a Node in a directory
|
||||
func (n *Node) lookupVfsNodeInDir(leaf string) (vfsNode vfs.Node, errno syscall.Errno) {
|
||||
dir, ok := n.node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
vfsNode, err := dir.Stat(leaf)
|
||||
return vfsNode, translateError(err)
|
||||
}
|
||||
|
||||
// // lookup a Dir given a path
|
||||
// func (n *Node) lookupDir(path string) (dir *vfs.Dir, code fuse.Status) {
|
||||
// node, code := fsys.lookupVfsNodeInDir(path)
|
||||
// if !code.Ok() {
|
||||
// return nil, code
|
||||
// }
|
||||
// dir, ok := n.(*vfs.Dir)
|
||||
// if !ok {
|
||||
// return nil, fuse.ENOTDIR
|
||||
// }
|
||||
// return dir, fuse.OK
|
||||
// }
|
||||
|
||||
// // lookup a parent Dir given a path returning the dir and the leaf
|
||||
// func (n *Node) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, code fuse.Status) {
|
||||
// parentDir, leaf := path.Split(filePath)
|
||||
// dir, code = fsys.lookupDir(parentDir)
|
||||
// return leaf, dir, code
|
||||
// }
|
||||
|
||||
// Statfs implements statistics for the filesystem that holds this
|
||||
// Inode. If not defined, the `out` argument will zeroed with an OK
|
||||
// result. This is because OSX filesystems must Statfs, or the mount
|
||||
// will not work.
|
||||
func (n *Node) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
|
||||
defer log.Trace(n, "")("out=%+v", &out)
|
||||
out = new(fuse.StatfsOut)
|
||||
const blockSize = 4096
|
||||
const fsBlocks = (1 << 50) / blockSize
|
||||
out.Blocks = fsBlocks // Total data blocks in file system.
|
||||
out.Bfree = fsBlocks // Free blocks in file system.
|
||||
out.Bavail = fsBlocks // Free blocks in file system if you're not root.
|
||||
out.Files = 1e9 // Total files in file system.
|
||||
out.Ffree = 1e9 // Free files in file system.
|
||||
out.Bsize = blockSize // Block size
|
||||
out.NameLen = 255 // Maximum file name length?
|
||||
out.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.
|
||||
mountlib.ClipBlocks(&out.Blocks)
|
||||
mountlib.ClipBlocks(&out.Bfree)
|
||||
mountlib.ClipBlocks(&out.Bavail)
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeStatfser)((*Node)(nil))
|
||||
|
||||
// Getattr reads attributes for an Inode. The library will ensure that
|
||||
// Mode and Ino are set correctly. For files that are not opened with
|
||||
// FOPEN_DIRECTIO, Size should be set so it can be read correctly. If
|
||||
// returning zeroed permissions, the default behavior is to change the
|
||||
// mode of 0755 (directory) or 0644 (files). This can be switched off
|
||||
// with the Options.NullPermissions setting. If blksize is unset, 4096
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
func (n *Node) Getattr(ctx context.Context, f fusefs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
setAttrOut(n.node, out)
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeGetattrer)((*Node)(nil))
|
||||
|
||||
// Setattr sets attributes for an Inode.
|
||||
func (n *Node) Setattr(ctx context.Context, f fusefs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) (errno syscall.Errno) {
|
||||
defer log.Trace(n, "in=%v", in)("out=%#v, errno=%v", &out, &errno)
|
||||
var err error
|
||||
setAttrOut(n.node, out)
|
||||
size, ok := in.GetSize()
|
||||
if ok {
|
||||
err = n.node.Truncate(int64(size))
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
out.Attr.Size = size
|
||||
}
|
||||
mtime, ok := in.GetMTime()
|
||||
if ok {
|
||||
err = n.node.SetModTime(mtime)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
out.Attr.Mtime = uint64(mtime.Unix())
|
||||
out.Attr.Mtimensec = uint32(mtime.Nanosecond())
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeSetattrer)((*Node)(nil))
|
||||
|
||||
// Open opens an Inode (of regular file type) for reading. It
|
||||
// is optional but recommended to return a FileHandle.
|
||||
func (n *Node) Open(ctx context.Context, flags uint32) (fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
defer log.Trace(n, "flags=%#o", flags)("errno=%v", &errno)
|
||||
// fuse flags are based off syscall flags as are os flags, so
|
||||
// should be compatible
|
||||
handle, err := n.node.Open(int(flags))
|
||||
if err != nil {
|
||||
return nil, 0, translateError(err)
|
||||
}
|
||||
// If size unknown then use direct io to read
|
||||
if entry := n.node.DirEntry(); entry != nil && entry.Size() < 0 {
|
||||
fuseFlags |= fuse.FOPEN_DIRECT_IO
|
||||
}
|
||||
return newFileHandle(handle), fuseFlags, 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeOpener)((*Node)(nil))
|
||||
|
||||
// Lookup should find a direct child of a directory by the child's name. If
|
||||
// the entry does not exist, it should return ENOENT and optionally
|
||||
// set a NegativeTimeout in `out`. If it does exist, it should return
|
||||
// attribute data in `out` and return the Inode for the child. A new
|
||||
// inode can be created using `Inode.NewInode`. The new Inode will be
|
||||
// added to the FS tree automatically if the return status is OK.
|
||||
//
|
||||
// If a directory does not implement NodeLookuper, the library looks
|
||||
// for an existing child with the given name.
|
||||
//
|
||||
// The input to a Lookup is {parent directory, name string}.
|
||||
//
|
||||
// Lookup, if successful, must return an *Inode. Once the Inode is
|
||||
// returned to the kernel, the kernel can issue further operations,
|
||||
// such as Open or Getxattr on that node.
|
||||
//
|
||||
// A successful Lookup also returns an EntryOut. Among others, this
|
||||
// contains file attributes (mode, size, mtime, etc.).
|
||||
//
|
||||
// FUSE supports other operations that modify the namespace. For
|
||||
// example, the Symlink, Create, Mknod, Link methods all create new
|
||||
// children in directories. Hence, they also return *Inode and must
|
||||
// populate their fuse.EntryOut arguments.
|
||||
func (n *Node) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (inode *fusefs.Inode, errno syscall.Errno) {
|
||||
defer log.Trace(n, "name=%q", name)("inode=%v, attr=%v, errno=%v", &inode, &out, &errno)
|
||||
vfsNode, errno := n.lookupVfsNodeInDir(name)
|
||||
if errno != 0 {
|
||||
return nil, errno
|
||||
}
|
||||
newNode := &Node{
|
||||
node: vfsNode,
|
||||
fsys: n.fsys,
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// out.SetEntryTimeout(dt time.Duration)
|
||||
// out.SetAttrTimeout(dt time.Duration)
|
||||
setEntryOut(vfsNode, out)
|
||||
|
||||
return n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode}), 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeLookuper)((*Node)(nil))
|
||||
|
||||
// Opendir opens a directory Inode for reading its
|
||||
// contents. The actual reading is driven from Readdir, so
|
||||
// this method is just for performing sanity/permission
|
||||
// checks. The default is to return success.
|
||||
func (n *Node) Opendir(ctx context.Context) syscall.Errno {
|
||||
if !n.node.IsDir() {
|
||||
return syscall.ENOTDIR
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeOpendirer)((*Node)(nil))
|
||||
|
||||
type dirStream struct {
|
||||
nodes []os.FileInfo
|
||||
i int
|
||||
}
|
||||
|
||||
// HasNext indicates if there are further entries. HasNext
|
||||
// might be called on already closed streams.
|
||||
func (ds *dirStream) HasNext() bool {
|
||||
return ds.i < len(ds.nodes)
|
||||
}
|
||||
|
||||
// Next retrieves the next entry. It is only called if HasNext
|
||||
// has previously returned true. The Errno return may be used to
|
||||
// indicate I/O errors
|
||||
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
||||
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
||||
fi := ds.nodes[ds.i]
|
||||
de = fuse.DirEntry{
|
||||
// Mode is the file's mode. Only the high bits (eg. S_IFDIR)
|
||||
// are considered.
|
||||
Mode: getMode(fi),
|
||||
|
||||
// Name is the basename of the file in the directory.
|
||||
Name: path.Base(fi.Name()),
|
||||
|
||||
// Ino is the inode number.
|
||||
Ino: 0, // FIXME
|
||||
}
|
||||
ds.i++
|
||||
return de, 0
|
||||
}
|
||||
|
||||
// Close releases resources related to this directory
|
||||
// stream.
|
||||
func (ds *dirStream) Close() {
|
||||
}
|
||||
|
||||
var _ fusefs.DirStream = (*dirStream)(nil)
|
||||
|
||||
// Readdir opens a stream of directory entries.
|
||||
//
|
||||
// Readdir essentiallly returns a list of strings, and it is allowed
|
||||
// for Readdir to return different results from Lookup. For example,
|
||||
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||
// shows a single file).
|
||||
//
|
||||
// If a directory does not implement NodeReaddirer, a list of
|
||||
// currently known children from the tree is returned. This means that
|
||||
// static in-memory file systems need not implement NodeReaddirer.
|
||||
func (n *Node) Readdir(ctx context.Context) (ds fusefs.DirStream, errno syscall.Errno) {
|
||||
defer log.Trace(n, "")("ds=%v, errno=%v", &ds, &errno)
|
||||
if !n.node.IsDir() {
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
fh, err := n.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
defer func() {
|
||||
closeErr := fh.Close()
|
||||
if errno == 0 && closeErr != nil {
|
||||
errno = translateError(closeErr)
|
||||
}
|
||||
}()
|
||||
items, err := fh.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
return &dirStream{
|
||||
nodes: items,
|
||||
}, 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeReaddirer)((*Node)(nil))
|
||||
|
||||
// Mkdir is similar to Lookup, but must create a directory entry and Inode.
|
||||
// Default is to return EROFS.
|
||||
func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (inode *fusefs.Inode, errno syscall.Errno) {
|
||||
defer log.Trace(name, "mode=0%o", mode)("inode=%v, errno=%v", &inode, &errno)
|
||||
dir, ok := n.node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
newDir, err := dir.Mkdir(name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
newNode := newNode(n.fsys, newDir)
|
||||
setEntryOut(newNode.node, out)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
return newInode, 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeMkdirer)((*Node)(nil))
|
||||
|
||||
// Create is similar to Lookup, but should create a new
|
||||
// child. It typically also returns a FileHandle as a
|
||||
// reference for future reads/writes.
|
||||
// Default is to return EROFS.
|
||||
func (n *Node) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *fusefs.Inode, fh fusefs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
defer log.Trace(n, "name=%q, flags=%#o, mode=%#o", name, flags, mode)("node=%v, fh=%v, flags=%#o, errno=%v", &node, &fh, &fuseFlags, &errno)
|
||||
dir, ok := n.node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return nil, nil, 0, syscall.ENOTDIR
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := int(flags) | os.O_CREATE
|
||||
file, err := dir.Create(name, osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, 0, translateError(err)
|
||||
}
|
||||
handle, err := file.Open(osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, 0, translateError(err)
|
||||
}
|
||||
fh = newFileHandle(handle)
|
||||
// FIXME
|
||||
// fh = &fusefs.WithFlags{
|
||||
// File: fh,
|
||||
// //FuseFlags: fuse.FOPEN_NONSEEKABLE,
|
||||
// OpenFlags: flags,
|
||||
// }
|
||||
|
||||
// Find the created node
|
||||
vfsNode, errno := n.lookupVfsNodeInDir(name)
|
||||
if errno != 0 {
|
||||
return nil, nil, 0, errno
|
||||
}
|
||||
setEntryOut(vfsNode, out)
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
fs.Debugf(nil, "attr=%#v", out.Attr)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
return newInode, fh, 0, 0
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeCreater)((*Node)(nil))
|
||||
|
||||
// Unlink should remove a child from this directory. If the
|
||||
// return status is OK, the Inode is removed as child in the
|
||||
// FS tree automatically. Default is to return EROFS.
|
||||
func (n *Node) Unlink(ctx context.Context, name string) (errno syscall.Errno) {
|
||||
defer log.Trace(n, "name=%q", name)("errno=%v", &errno)
|
||||
vfsNode, errno := n.lookupVfsNodeInDir(name)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return translateError(vfsNode.Remove())
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeUnlinker)((*Node)(nil))
|
||||
|
||||
// Rmdir is like Unlink but for directories.
|
||||
// Default is to return EROFS.
|
||||
func (n *Node) Rmdir(ctx context.Context, name string) (errno syscall.Errno) {
|
||||
defer log.Trace(n, "name=%q", name)("errno=%v", &errno)
|
||||
vfsNode, errno := n.lookupVfsNodeInDir(name)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return translateError(vfsNode.Remove())
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeRmdirer)((*Node)(nil))
|
||||
|
||||
// Rename should move a child from one directory to a different
|
||||
// one. The change is effected in the FS tree if the return status is
|
||||
// OK. Default is to return EROFS.
|
||||
func (n *Node) Rename(ctx context.Context, oldName string, newParent fusefs.InodeEmbedder, newName string, flags uint32) (errno syscall.Errno) {
|
||||
defer log.Trace(n, "oldName=%q, newParent=%v, newName=%q", oldName, newParent, newName)("errno=%v", &errno)
|
||||
oldDir, ok := n.node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return syscall.ENOTDIR
|
||||
}
|
||||
newParentNode, ok := newParent.(*Node)
|
||||
if !ok {
|
||||
fs.Errorf(n, "newParent was not a *Node")
|
||||
return syscall.EIO
|
||||
}
|
||||
newDir, ok := newParentNode.node.(*vfs.Dir)
|
||||
if !ok {
|
||||
return syscall.ENOTDIR
|
||||
}
|
||||
return translateError(oldDir.Rename(oldName, newName, newDir))
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeRenamer)((*Node)(nil))
|
||||
@@ -36,6 +36,7 @@ var (
|
||||
NoAppleDouble = true // use noappledouble by default
|
||||
NoAppleXattr = false // do not use noapplexattr by default
|
||||
DaemonTimeout time.Duration // OSXFUSE only
|
||||
AsyncRead = true // do async reads by default
|
||||
)
|
||||
|
||||
// Global constants
|
||||
@@ -98,10 +99,11 @@ func checkMountpointOverlap(root, mountpoint string) error {
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
func NewMountCommand(commandName string, hidden bool, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: `
|
||||
rclone ` + commandName + ` allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
@@ -109,6 +111,11 @@ FUSE.
|
||||
|
||||
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
|
||||
|
||||
You can either run mount in foreground mode or background(daemon) mode. Mount runs in
|
||||
foreground mode by default, use the --daemon flag to specify background mode mode.
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
|
||||
Start the mount like this
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files /path/to/local/mount
|
||||
@@ -117,11 +124,15 @@ Or on Windows like this where X: is an unused drive letter
|
||||
|
||||
rclone ` + commandName + ` remote:path/to/files X:
|
||||
|
||||
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
|
||||
the mount is automatically stopped.
|
||||
When running in background mode the user will have to stop the mount manually (specified below).
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount is automatically stopped.
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually with
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
Stopping the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
@@ -157,6 +168,34 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
#### Mount as a network drive
|
||||
|
||||
By default, rclone will mount the remote as a normal drive. However,
|
||||
you can also mount it as a **Network Drive** (or **Network Share**, as
|
||||
mentioned in some places)
|
||||
|
||||
Unlike other systems, Windows provides a different filesystem type for
|
||||
network drives. Windows and other programs treat the network drives
|
||||
and fixed/removable drives differently: In network drives, many I/O
|
||||
operations are optimized, as the high latency and low reliability
|
||||
(compared to a normal drive) of a network is expected.
|
||||
|
||||
Although many people prefer network shares to be mounted as normal
|
||||
system drives, this might cause some issues, such as programs not
|
||||
working as expected or freezes and errors while operating with the
|
||||
mounted remote in Windows Explorer. If you experience any of those,
|
||||
consider mounting rclone remotes as network shares, as Windows expects
|
||||
normal drives to be fast and reliable, while cloud storage is far from
|
||||
that. See also [Limitations](#limitations) section below for more
|
||||
info
|
||||
|
||||
Add "--fuse-flag --VolumePrefix=\server\share" to your "mount"
|
||||
command, **replacing "share" with any other name of your choice if you
|
||||
are mounting more than one remote**. Otherwise, the mountpoints will
|
||||
conflict and your mounted filesystems will overlap.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of "--vfs-cache-mode" this can only write files
|
||||
@@ -318,6 +357,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
flags.BoolVarP(cmdFlags, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(cmdFlags, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(cmdFlags, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
flags.BoolVarP(cmdFlags, &AsyncRead, "async-read", "", AsyncRead, "Use asynchronous reads.")
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
flags.BoolVarP(cmdFlags, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -44,11 +45,11 @@ var (
|
||||
func RunTests(t *testing.T, fn MountFn) {
|
||||
mountFn = fn
|
||||
flag.Parse()
|
||||
cacheModes := []vfs.CacheMode{
|
||||
vfs.CacheModeOff,
|
||||
vfs.CacheModeMinimal,
|
||||
vfs.CacheModeWrites,
|
||||
vfs.CacheModeFull,
|
||||
cacheModes := []vfscommon.CacheMode{
|
||||
vfscommon.CacheModeOff,
|
||||
vfscommon.CacheModeMinimal,
|
||||
vfscommon.CacheModeWrites,
|
||||
vfscommon.CacheModeFull,
|
||||
}
|
||||
run = newRun()
|
||||
for _, cacheMode := range cacheModes {
|
||||
@@ -207,7 +208,7 @@ func (r *Run) umount() {
|
||||
}
|
||||
|
||||
// cacheMode flushes the VFS and changes the CacheMode
|
||||
func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
|
||||
func (r *Run) cacheMode(cacheMode vfscommon.CacheMode) {
|
||||
if r.skip {
|
||||
log.Printf("FUSE not found so skipping cacheMode")
|
||||
return
|
||||
|
||||
@@ -5,10 +5,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileNoWrite tests writing a file with no write()'s to it
|
||||
@@ -91,7 +90,7 @@ func TestWriteFileFsync(t *testing.T) {
|
||||
func TestWriteFileDup(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
@@ -136,7 +135,7 @@ func TestWriteFileDup(t *testing.T) {
|
||||
func TestWriteFileAppend(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,10 +6,9 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// TestWriteFileDoubleClose tests double close on write
|
||||
@@ -45,7 +44,7 @@ func TestWriteFileDoubleClose(t *testing.T) {
|
||||
|
||||
// write to the other dup
|
||||
_, err = unix.Write(fd2, buf)
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
if run.vfs.Opt.CacheMode < vfscommon.CacheModeWrites {
|
||||
// produces an error if cache mode < writes
|
||||
assert.Error(t, err, "input/output error")
|
||||
} else {
|
||||
|
||||
@@ -16,8 +16,8 @@ import (
|
||||
|
||||
"github.com/anacrolix/dms/dlna"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/anacrolix/dms/upnpav"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/upnpav"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
@@ -77,16 +77,10 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
children, err := cds.readContainer(cdsObject, host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.Class = "object.container.storageFolder"
|
||||
obj.Title = fileInfo.Name()
|
||||
return upnpav.Container{
|
||||
Object: obj,
|
||||
ChildCount: len(children),
|
||||
Object: obj,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -110,6 +104,7 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
||||
|
||||
obj.Class = "object.item." + mediaType[1] + "Item"
|
||||
obj.Title = fileInfo.Name()
|
||||
obj.Date = upnpav.Timestamp{Time: fileInfo.ModTime()}
|
||||
|
||||
item := upnpav.Item{
|
||||
Object: obj,
|
||||
|
||||
@@ -122,8 +122,10 @@ func TestContentDirectoryBrowseMetadata(t *testing.T) {
|
||||
// expect a <container> element
|
||||
require.Contains(t, string(body), html.EscapeString("<container "))
|
||||
require.NotContains(t, string(body), html.EscapeString("<item "))
|
||||
// with a non-zero childCount
|
||||
require.Regexp(t, " childCount="[1-9]", string(body))
|
||||
// if there is a childCount, it better not be zero
|
||||
require.NotContains(t, string(body), html.EscapeString(" childCount=\"0\""))
|
||||
// should have a dc:date element
|
||||
require.Contains(t, string(body), html.EscapeString("<dc:date>"))
|
||||
}
|
||||
|
||||
// Check that the X_MS_MediaReceiverRegistrar is faked out properly.
|
||||
|
||||
63
cmd/serve/dlna/upnpav/upnpav.go
Normal file
63
cmd/serve/dlna/upnpav/upnpav.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package upnpav
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// NoSuchObjectErrorCode : The specified ObjectID is invalid.
|
||||
NoSuchObjectErrorCode = 701
|
||||
)
|
||||
|
||||
// Resource description
|
||||
type Resource struct {
|
||||
XMLName xml.Name `xml:"res"`
|
||||
ProtocolInfo string `xml:"protocolInfo,attr"`
|
||||
URL string `xml:",chardata"`
|
||||
Size uint64 `xml:"size,attr,omitempty"`
|
||||
Bitrate uint `xml:"bitrate,attr,omitempty"`
|
||||
Duration string `xml:"duration,attr,omitempty"`
|
||||
Resolution string `xml:"resolution,attr,omitempty"`
|
||||
}
|
||||
|
||||
// Container description
|
||||
type Container struct {
|
||||
Object
|
||||
XMLName xml.Name `xml:"container"`
|
||||
ChildCount *int `xml:"childCount,attr"`
|
||||
}
|
||||
|
||||
// Item description
|
||||
type Item struct {
|
||||
Object
|
||||
XMLName xml.Name `xml:"item"`
|
||||
Res []Resource
|
||||
InnerXML string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// Object description
|
||||
type Object struct {
|
||||
ID string `xml:"id,attr"`
|
||||
ParentID string `xml:"parentID,attr"`
|
||||
Restricted int `xml:"restricted,attr"` // indicates whether the object is modifiable
|
||||
Class string `xml:"upnp:class"`
|
||||
Icon string `xml:"upnp:icon,omitempty"`
|
||||
Title string `xml:"dc:title"`
|
||||
Date Timestamp `xml:"dc:date"`
|
||||
Artist string `xml:"upnp:artist,omitempty"`
|
||||
Album string `xml:"upnp:album,omitempty"`
|
||||
Genre string `xml:"upnp:genre,omitempty"`
|
||||
AlbumArtURI string `xml:"upnp:albumArtURI,omitempty"`
|
||||
Searchable int `xml:"searchable,attr"`
|
||||
}
|
||||
|
||||
// Timestamp wraps time.Time for formatting purposes
|
||||
type Timestamp struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// MarshalXML formats the Timestamp per DIDL-Lite spec
|
||||
func (t Timestamp) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.Format("2006-01-02"), start)
|
||||
}
|
||||
@@ -442,7 +442,7 @@ func (d *Driver) GetFile(path string, offset int64) (size int64, fr io.ReadClose
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
_, err = handle.Seek(offset, os.SEEK_SET)
|
||||
_, err = handle.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
@@ -29,6 +29,10 @@ rclone will use that program to generate backends on the fly which
|
||||
then are used to authenticate incoming requests. This uses a simple
|
||||
JSON based protocl with input on STDIN and output on STDOUT.
|
||||
|
||||
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
|
||||
together, if |--auth-proxy| is set the authorized keys option will be
|
||||
ignored.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
@@ -46,7 +50,8 @@ This config generated must have this extra parameter
|
||||
And it may have this parameter
|
||||
- |_obscure| - comma separated strings for parameters to obscure
|
||||
|
||||
For example the program might take this on STDIN
|
||||
If password authentication was used by the client, input to the proxy
|
||||
process (on STDIN) would look similar to this:
|
||||
|
||||
|||
|
||||
{
|
||||
@@ -55,7 +60,17 @@ For example the program might take this on STDIN
|
||||
}
|
||||
|||
|
||||
|
||||
And return this on STDOUT
|
||||
If public-key authentication was used by the client, input to the
|
||||
proxy process (on STDIN) would look similar to this:
|
||||
|
||||
|||
|
||||
{
|
||||
"user": "me",
|
||||
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
|
||||
}
|
||||
|||
|
||||
|
||||
And as an example return this on STDOUT
|
||||
|
||||
|||
|
||||
{
|
||||
@@ -69,7 +84,7 @@ And return this on STDOUT
|
||||
|||
|
||||
|
||||
This would mean that an SFTP backend would be created on the fly for
|
||||
the |user| and |pass| returned in the output to the host given. Note
|
||||
the |user| and |pass|/|public_key| returned in the output to the host given. Note
|
||||
that since |_obscure| is set to |pass|, rclone will obscure the |pass|
|
||||
parameter before creating the backend (which is required for sftp
|
||||
backends).
|
||||
@@ -81,8 +96,8 @@ in the output and the user to |user|. For security you'd probably want
|
||||
to restrict the |host| to a limited list.
|
||||
|
||||
Note that an internal cache is keyed on |user| so only use that for
|
||||
configuration, don't use |pass|. This also means that if a user's
|
||||
password is changed the cache will need to expire (which takes 5 mins)
|
||||
configuration, don't use |pass| or |public_key|. This also means that if a user's
|
||||
password or public-key is changed the cache will need to expire (which takes 5 mins)
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
|
||||
@@ -71,7 +71,7 @@ control the stats printing.
|
||||
|
||||
You must provide some means of authentication, either with --user/--pass,
|
||||
an authorized keys file (specify location with --authorized-keys - the
|
||||
default is the same as ssh) or set the --no-auth flag for no
|
||||
default is the same as ssh), an --auth-proxy, or set the --no-auth flag for no
|
||||
authentication when logging in.
|
||||
|
||||
Note that this also implements a small number of shell commands so
|
||||
|
||||
@@ -334,3 +334,8 @@ Contributors
|
||||
* unbelauscht <58393353+unbelauscht@users.noreply.github.com>
|
||||
* Motonori IWAMURO <vmi@nifty.com>
|
||||
* Benjapol Worakan <benwrk@live.com>
|
||||
* Dave Koston <dave.koston@stackpath.com>
|
||||
* Durval Menezes <DurvalMenezes@users.noreply.github.com>
|
||||
* Tim Gallant <me@timgallant.us>
|
||||
* Frederick Zhang <frederick888@tsundere.moe>
|
||||
* valery1707 <valery1707@gmail.com>
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone"
|
||||
slug: rclone
|
||||
url: /commands/rclone/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone about"
|
||||
slug: rclone_about
|
||||
url: /commands/rclone_about/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/about/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone about
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone authorize"
|
||||
slug: rclone_authorize
|
||||
url: /commands/rclone_authorize/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/authorize/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone authorize
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone cachestats"
|
||||
slug: rclone_cachestats
|
||||
url: /commands/rclone_cachestats/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cachestats/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone cachestats
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T15:06:43Z
|
||||
title: "rclone cat"
|
||||
slug: rclone_cat
|
||||
url: /commands/rclone_cat/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cat/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone cat
|
||||
|
||||
@@ -17,11 +18,11 @@ You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|
||||
Or like this to output any file in dir or subdirectories.
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|
||||
Or like this to output any .txt files in dir or subdirectories.
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone check"
|
||||
slug: rclone_check
|
||||
url: /commands/rclone_check/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/check/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone check
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone cleanup"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone cleanup
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config"
|
||||
slug: rclone_config
|
||||
url: /commands/rclone_config/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config create"
|
||||
slug: rclone_config_create
|
||||
url: /commands/rclone_config_create/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/create/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config create
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config delete"
|
||||
slug: rclone_config_delete
|
||||
url: /commands/rclone_config_delete/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/delete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config delete
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config disconnect"
|
||||
slug: rclone_config_disconnect
|
||||
url: /commands/rclone_config_disconnect/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/disconnect/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config disconnect
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config dump"
|
||||
slug: rclone_config_dump
|
||||
url: /commands/rclone_config_dump/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/dump/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config dump
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config edit"
|
||||
slug: rclone_config_edit
|
||||
url: /commands/rclone_config_edit/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/edit/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config edit
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config file"
|
||||
slug: rclone_config_file
|
||||
url: /commands/rclone_config_file/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/file/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config file
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config password"
|
||||
slug: rclone_config_password
|
||||
url: /commands/rclone_config_password/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/password/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config password
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config providers"
|
||||
slug: rclone_config_providers
|
||||
url: /commands/rclone_config_providers/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/providers/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config providers
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config reconnect"
|
||||
slug: rclone_config_reconnect
|
||||
url: /commands/rclone_config_reconnect/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/reconnect/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config reconnect
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config show"
|
||||
slug: rclone_config_show
|
||||
url: /commands/rclone_config_show/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/show/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config show
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T15:06:43Z
|
||||
title: "rclone config update"
|
||||
slug: rclone_config_update
|
||||
url: /commands/rclone_config_update/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/update/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config update
|
||||
|
||||
@@ -22,7 +23,7 @@ you would do:
|
||||
If any of the parameters passed is a password field, then rclone will
|
||||
automatically obscure them before putting them in the config file.
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone config userinfo"
|
||||
slug: rclone_config_userinfo
|
||||
url: /commands/rclone_config_userinfo/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/userinfo/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone config userinfo
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone copy"
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone copy
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone copyto"
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone copyto
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone copyurl"
|
||||
slug: rclone_copyurl
|
||||
url: /commands/rclone_copyurl/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyurl/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone copyurl
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone cryptcheck"
|
||||
slug: rclone_cryptcheck
|
||||
url: /commands/rclone_cryptcheck/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptcheck/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone cryptcheck
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone cryptdecode"
|
||||
slug: rclone_cryptdecode
|
||||
url: /commands/rclone_cryptdecode/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptdecode/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone cryptdecode
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone dbhashsum"
|
||||
slug: rclone_dbhashsum
|
||||
url: /commands/rclone_dbhashsum/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/dbhashsum/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone dbhashsum
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone dedupe"
|
||||
slug: rclone_dedupe
|
||||
url: /commands/rclone_dedupe/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/dedupe/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone dedupe
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone delete"
|
||||
slug: rclone_delete
|
||||
url: /commands/rclone_delete/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/delete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone delete
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone deletefile"
|
||||
slug: rclone_deletefile
|
||||
url: /commands/rclone_deletefile/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/deletefile/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone deletefile
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone genautocomplete"
|
||||
slug: rclone_genautocomplete
|
||||
url: /commands/rclone_genautocomplete/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/genautocomplete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone genautocomplete
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone genautocomplete bash"
|
||||
slug: rclone_genautocomplete_bash
|
||||
url: /commands/rclone_genautocomplete_bash/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/genautocomplete/bash/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone genautocomplete bash
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone genautocomplete zsh"
|
||||
slug: rclone_genautocomplete_zsh
|
||||
url: /commands/rclone_genautocomplete_zsh/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/genautocomplete/zsh/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone genautocomplete zsh
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone gendocs"
|
||||
slug: rclone_gendocs
|
||||
url: /commands/rclone_gendocs/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/gendocs/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone gendocs
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone hashsum"
|
||||
slug: rclone_hashsum
|
||||
url: /commands/rclone_hashsum/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/hashsum/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone hashsum
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone link"
|
||||
slug: rclone_link
|
||||
url: /commands/rclone_link/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/link/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone link
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone listremotes"
|
||||
slug: rclone_listremotes
|
||||
url: /commands/rclone_listremotes/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/listremotes/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone listremotes
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone ls"
|
||||
slug: rclone_ls
|
||||
url: /commands/rclone_ls/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/ls/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone ls
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone lsd"
|
||||
slug: rclone_lsd
|
||||
url: /commands/rclone_lsd/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsd/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone lsd
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone lsf"
|
||||
slug: rclone_lsf
|
||||
url: /commands/rclone_lsf/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsf/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone lsf
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone lsjson"
|
||||
slug: rclone_lsjson
|
||||
url: /commands/rclone_lsjson/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsjson/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone lsjson
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone lsl"
|
||||
slug: rclone_lsl
|
||||
url: /commands/rclone_lsl/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/lsl/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone lsl
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone md5sum"
|
||||
slug: rclone_md5sum
|
||||
url: /commands/rclone_md5sum/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/md5sum/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone md5sum
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone mkdir"
|
||||
slug: rclone_mkdir
|
||||
url: /commands/rclone_mkdir/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/mkdir/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone mkdir
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T15:20:27Z
|
||||
title: "rclone mount"
|
||||
slug: rclone_mount
|
||||
url: /commands/rclone_mount/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/mount/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone mount
|
||||
|
||||
@@ -17,6 +18,11 @@ FUSE.
|
||||
|
||||
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
||||
|
||||
You can either run mount in foreground mode or background(daemon) mode. Mount runs in
|
||||
foreground mode by default, use the --daemon flag to specify background mode mode.
|
||||
Background mode is only supported on Linux and OSX, you can only run mount in
|
||||
foreground mode on Windows.
|
||||
|
||||
Start the mount like this
|
||||
|
||||
rclone mount remote:path/to/files /path/to/local/mount
|
||||
@@ -25,11 +31,15 @@ Or on Windows like this where X: is an unused drive letter
|
||||
|
||||
rclone mount remote:path/to/files X:
|
||||
|
||||
When the program ends, either via Ctrl+C or receiving a SIGINT or SIGTERM signal,
|
||||
the mount is automatically stopped.
|
||||
When running in background mode the user will have to stop the mount manually (specified below).
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount is automatically stopped.
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually with
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
|
||||
Stopping the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
@@ -65,6 +75,34 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
|
||||
which creates drives accessible for everyone on the system or
|
||||
alternatively using [the nssm service manager](https://nssm.cc/usage).
|
||||
|
||||
#### Mount as a network drive
|
||||
|
||||
By default, rclone will mount the remote as a normal drive. However,
|
||||
you can also mount it as a **Network Drive** (or **Network Share**, as
|
||||
mentioned in some places)
|
||||
|
||||
Unlike other systems, Windows provides a different filesystem type for
|
||||
network drives. Windows and other programs treat the network drives
|
||||
and fixed/removable drives differently: In network drives, many I/O
|
||||
operations are optimized, as the high latency and low reliability
|
||||
(compared to a normal drive) of a network is expected.
|
||||
|
||||
Although many people prefer network shares to be mounted as normal
|
||||
system drives, this might cause some issues, such as programs not
|
||||
working as expected or freezes and errors while operating with the
|
||||
mounted remote in Windows Explorer. If you experience any of those,
|
||||
consider mounting rclone remotes as network shares, as Windows expects
|
||||
normal drives to be fast and reliable, while cloud storage is far from
|
||||
that. See also [Limitations](#limitations) section below for more
|
||||
info
|
||||
|
||||
Add "--fuse-flag --VolumePrefix=\server\share" to your "mount"
|
||||
command, **replacing "share" with any other name of your choice if you
|
||||
are mounting more than one remote**. Otherwise, the mountpoints will
|
||||
conflict and your mounted filesystems will overlap.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
|
||||
### Limitations
|
||||
|
||||
Without the use of "--vfs-cache-mode" this can only write files
|
||||
@@ -159,7 +197,9 @@ Using the `--dir-cache-time` flag, you can set how long a
|
||||
directory should be considered up to date and not refreshed from the
|
||||
backend. Changes made locally in the mount may appear immediately or
|
||||
invalidate the cache. However, changes done on the remote will only
|
||||
be picked up once the cache expires.
|
||||
be picked up once the cache expires if the backend configured does not
|
||||
support polling for changes. If the backend supports polling, changes
|
||||
will be picked up on within the polling interval.
|
||||
|
||||
Alternatively, you can send a `SIGHUP` signal to rclone for
|
||||
it to flush all directory caches, regardless of how old they are.
|
||||
@@ -287,6 +327,42 @@ This mode should support all normal file system operations.
|
||||
If an upload or download fails it will be retried up to
|
||||
--low-level-retries times.
|
||||
|
||||
### Case Sensitivity
|
||||
|
||||
Linux file systems are case-sensitive: two files can differ only
|
||||
by case, and the exact case must be used when opening a file.
|
||||
|
||||
Windows is not like most other operating systems supported by rclone.
|
||||
File systems in modern Windows are case-insensitive but case-preserving:
|
||||
although existing files can be opened using any case, the exact case used
|
||||
to create the file is preserved and available for programs to query.
|
||||
It is not allowed for two files in the same directory to differ only by case.
|
||||
|
||||
Usually file systems on MacOS are case-insensitive. It is possible to make MacOS
|
||||
file systems case-sensitive but that is not the default
|
||||
|
||||
The "--vfs-case-insensitive" mount flag controls how rclone handles these
|
||||
two cases. If its value is "false", rclone passes file names to the mounted
|
||||
file system as is. If the flag is "true" (or appears without a value on
|
||||
command line), rclone may perform a "fixup" as explained below.
|
||||
|
||||
The user may specify a file name to open/delete/rename/etc with a case
|
||||
different than what is stored on mounted file system. If an argument refers
|
||||
to an existing file with exactly the same name, then the case of the existing
|
||||
file on the disk will be used. However, if a file name with exactly the same
|
||||
name is not found but a name differing only by case exists, rclone will
|
||||
transparently fixup the name. This fixup happens only when an existing file
|
||||
is requested. Case sensitivity of file names created anew by rclone is
|
||||
controlled by an underlying mounted file system.
|
||||
|
||||
Note that case sensitivity of the operating system running rclone (the target)
|
||||
may differ from case sensitivity of a file system mounted by rclone (the source).
|
||||
The flag controls whether "fixup" is performed to satisfy the target.
|
||||
|
||||
If the flag is not provided on command line, then its default value depends
|
||||
on the operating system where rclone runs: "true" on Windows and MacOS, "false"
|
||||
otherwise. If the flag is provided without a value, then it is "true".
|
||||
|
||||
|
||||
```
|
||||
rclone mount remote:path /path/to/mountpoint [flags]
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone move"
|
||||
slug: rclone_move
|
||||
url: /commands/rclone_move/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/move/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone move
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone moveto"
|
||||
slug: rclone_moveto
|
||||
url: /commands/rclone_moveto/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/moveto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone moveto
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
---
|
||||
date: 2020-02-01T10:26:53Z
|
||||
date: 2020-02-10T12:28:36Z
|
||||
title: "rclone ncdu"
|
||||
slug: rclone_ncdu
|
||||
url: /commands/rclone_ncdu/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/ncdu/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone ncdu
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user