mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 22:33:35 +00:00
Compare commits
72 Commits
v1.67.0
...
fix-7959-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
800ebb6480 | ||
|
|
4824837eed | ||
|
|
5287a9b5fa | ||
|
|
f2ce1767f0 | ||
|
|
7f048ac901 | ||
|
|
b0d0e0b267 | ||
|
|
f5eef420a4 | ||
|
|
9de485f949 | ||
|
|
d4b29fef92 | ||
|
|
471531eb6a | ||
|
|
afd2663057 | ||
|
|
97d6a00483 | ||
|
|
5ddedae431 | ||
|
|
e1b7bf7701 | ||
|
|
2a615f4681 | ||
|
|
e041796bfe | ||
|
|
1b9217bc78 | ||
|
|
846c1aeed0 | ||
|
|
56caab2033 | ||
|
|
495a5759d3 | ||
|
|
d9bd6f35f2 | ||
|
|
532a0818f7 | ||
|
|
91558ce6aa | ||
|
|
8fbb259091 | ||
|
|
4d2bc190cc | ||
|
|
c2bf300dd8 | ||
|
|
c954c397d9 | ||
|
|
25c6379688 | ||
|
|
ce1859cd82 | ||
|
|
cf25ae69ad | ||
|
|
dce8317042 | ||
|
|
eff2497633 | ||
|
|
28ba4b832d | ||
|
|
58da1a165c | ||
|
|
eec95a164d | ||
|
|
44cd2e07ca | ||
|
|
a28287e96d | ||
|
|
fc1d8dafd5 | ||
|
|
2c57fe9826 | ||
|
|
7c51b10d15 | ||
|
|
3280b6b83c | ||
|
|
1a77a2f92b | ||
|
|
c156716d01 | ||
|
|
0d9d0eef4c | ||
|
|
2e653f8128 | ||
|
|
e79273f9c9 | ||
|
|
8e10fe71f7 | ||
|
|
c6ab37a59f | ||
|
|
671a15f65f | ||
|
|
8d72698d5a | ||
|
|
6e853c82d8 | ||
|
|
27267547b9 | ||
|
|
cdcf0e5cb8 | ||
|
|
6507770014 | ||
|
|
bd5799c079 | ||
|
|
c834eb7dcb | ||
|
|
754e53dbcc | ||
|
|
5511fa441a | ||
|
|
4ed4483bbc | ||
|
|
0e85ba5080 | ||
|
|
e5095a7d7b | ||
|
|
300851e8bf | ||
|
|
cbccad9491 | ||
|
|
9f1a7cfa67 | ||
|
|
d84a4c9ac1 | ||
|
|
1c9da8c96a | ||
|
|
af9c5fef93 | ||
|
|
7060777d1d | ||
|
|
0197e7f4e5 | ||
|
|
c1c9e209f3 | ||
|
|
fd182af866 | ||
|
|
4ea629446f |
@@ -56,7 +56,7 @@ jobs:
|
||||
run: |
|
||||
df -h .
|
||||
- name: Build and publish image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,7 +3,9 @@ _junk/
|
||||
rclone
|
||||
rclone.exe
|
||||
build
|
||||
docs/public
|
||||
/docs/public/
|
||||
/docs/.hugo_build.lock
|
||||
/docs/static/img/logos/
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
@@ -16,6 +18,5 @@ fuzz-build.zip
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
/docs/static/img/logos/
|
||||
resource_windows_*.syso
|
||||
.devcontainer
|
||||
|
||||
@@ -711,10 +711,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
|
||||
// Here we auth by setting one of cred, sharedKeyCred or f.svc
|
||||
// Here we auth by setting one of cred, sharedKeyCred, f.svc or anonymous
|
||||
var (
|
||||
cred azcore.TokenCredential
|
||||
sharedKeyCred *service.SharedKeyCredential
|
||||
anonymous = false
|
||||
)
|
||||
switch {
|
||||
case opt.EnvAuth:
|
||||
@@ -874,6 +875,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
default:
|
||||
return nil, errors.New("no authentication method configured")
|
||||
}
|
||||
@@ -903,6 +907,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client failed: %w", err)
|
||||
}
|
||||
} else if anonymous {
|
||||
// Anonymous public access
|
||||
f.svc, err = service.NewClientWithNoCredential(opt.Endpoint, &clientOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create public client failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.svc == nil {
|
||||
|
||||
@@ -299,13 +299,14 @@ type Fs struct {
|
||||
|
||||
// Object describes a b2 object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
id string // b2 id of the file
|
||||
modTime time.Time // The modified time of the object if known
|
||||
sha1 string // SHA-1 hash if known
|
||||
size int64 // Size of the object
|
||||
mimeType string // Content-Type of the object
|
||||
meta map[string]string // The object metadata if known - may be nil - with lower case keys
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1593,7 +1594,14 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
return o.parseTimeString(Info[timeKey])
|
||||
err = o.parseTimeString(Info[timeKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// For now, just set "mtime" in metadata
|
||||
o.meta = make(map[string]string, 1)
|
||||
o.meta["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeMetaData sets the metadata in the object from an api.File
|
||||
@@ -1695,6 +1703,16 @@ func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
|
||||
}
|
||||
|
||||
// parseTimeStringHelper converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time
|
||||
func parseTimeStringHelper(timeString string) (time.Time, error) {
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC(), nil
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
@@ -1702,12 +1720,12 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
||||
if timeString == "" {
|
||||
return nil
|
||||
}
|
||||
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
|
||||
modTime, err := parseTimeStringHelper(timeString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
|
||||
return nil
|
||||
}
|
||||
o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1861,6 +1879,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
ContentType: resp.Header.Get("Content-Type"),
|
||||
Info: Info,
|
||||
}
|
||||
|
||||
// Embryonic metadata support - just mtime
|
||||
o.meta = make(map[string]string, 1)
|
||||
modTime, err := parseTimeStringHelper(info.Info[timeKey])
|
||||
if err == nil {
|
||||
o.meta["mtime"] = modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// When reading files from B2 via cloudflare using
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
@@ -1958,7 +1984,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
@@ -1990,7 +2016,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(up.info)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
@@ -2095,6 +2124,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Get modTime from the source; if --metadata is set, fetch the src metadata and get it from there.
|
||||
// When metadata support is added to b2, this method will need a more generic name
|
||||
func (o *Object) getModTime(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (time.Time, error) {
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k = strings.ToLower(k)
|
||||
// For now, the only metadata we're concerned with is "mtime"
|
||||
switch k {
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
default:
|
||||
// Do nothing for now
|
||||
}
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
@@ -2126,7 +2185,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
|
||||
@@ -184,57 +184,126 @@ func TestParseTimeString(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// This is adapted from the s3 equivalent.
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
original := random.String(1000)
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, mimeType, nil)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We currently have a limited amount of metadata to test with B2
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
// Return a map of the headers in the options with keys stripped of the "x-bz-info-" prefix
|
||||
func OpenOptionToMetaData(options []fs.OpenOption) map[string]string {
|
||||
var headers = make(map[string]string)
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
headers[k[len(headerPrefix):]] = v
|
||||
}
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
return headers
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
func (f *Fs) internalTestMetadata(t *testing.T, size string, uploadCutoff string, chunkSize string) {
|
||||
what := fmt.Sprintf("Size%s/UploadCutoff%s/ChunkSize%s", size, uploadCutoff, chunkSize)
|
||||
t.Run(what, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(size)
|
||||
require.NoError(t, err)
|
||||
original := random.String(int(ss))
|
||||
|
||||
contents := fstest.Gz(t, original)
|
||||
mimeType := "text/html"
|
||||
|
||||
if chunkSize != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(chunkSize)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadChunkSize(ss)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
if uploadCutoff != "" {
|
||||
ss := fs.SizeSuffix(0)
|
||||
err := ss.Set(uploadCutoff)
|
||||
require.NoError(t, err)
|
||||
_, err = f.SetUploadCutoff(ss)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
btime := time.Now()
|
||||
metadata := fs.Metadata{
|
||||
// Just mtime for now - limit to milliseconds since x-bz-info-src_last_modified_millis can't support any
|
||||
|
||||
"mtime": "2009-05-06T04:05:06.499Z",
|
||||
}
|
||||
|
||||
// Need to specify HTTP options with the header prefix since they are passed as-is
|
||||
options := []fs.OpenOption{
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-a", Value: "1"},
|
||||
&fs.HTTPOption{Key: "X-Bz-Info-b", Value: "2"},
|
||||
}
|
||||
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, mimeType, metadata, options...)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
o := obj.(*Object)
|
||||
gotMetadata, err := o.getMetaData(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// X-Bz-Info-a & X-Bz-Info-b
|
||||
optMetadata := OpenOptionToMetaData(options)
|
||||
for k, v := range optMetadata {
|
||||
got := gotMetadata.Info[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
// mtime
|
||||
for k, v := range metadata {
|
||||
got := o.meta[k]
|
||||
assert.Equal(t, v, got, k)
|
||||
}
|
||||
|
||||
assert.Equal(t, mimeType, gotMetadata.ContentType, "Content-Type")
|
||||
|
||||
// Modification time from the x-bz-info-src_last_modified_millis header
|
||||
var mtime api.Timestamp
|
||||
err = mtime.UnmarshalJSON([]byte(gotMetadata.Info[timeKey]))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad "+timeHeader+" header: %v", err)
|
||||
}
|
||||
assert.Equal(t, item.ModTime, time.Time(mtime), "Modification time")
|
||||
|
||||
// Upload time
|
||||
gotBtime := time.Time(gotMetadata.UploadTimestamp)
|
||||
dt := gotBtime.Sub(btime)
|
||||
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("btime more than 1 minute out want %v got %v delta %v", btime, gotBtime, dt))
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
assert.Equal(t, wantContents, gotContents)
|
||||
assert.Equal(t, wantSize, o.Size())
|
||||
gotHash, err := o.Hash(ctx, hash.SHA1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, wantHash, gotHash)
|
||||
}
|
||||
|
||||
t.Run("NoDecompress", func(t *testing.T) {
|
||||
checkDownload(contents, int64(len(contents)), sha1Sum(t, contents))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// 1 kB regular file
|
||||
f.internalTestMetadata(t, "1kiB", "", "")
|
||||
|
||||
// 10 MiB large file
|
||||
f.internalTestMetadata(t, "10MiB", "6MiB", "6MiB")
|
||||
}
|
||||
|
||||
func sha1Sum(t *testing.T, s string) string {
|
||||
hash := sha1.Sum([]byte(s))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
|
||||
@@ -91,7 +91,7 @@ type largeUpload struct {
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
//
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File, options ...fs.OpenOption) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
@@ -104,11 +104,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
parts++
|
||||
}
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -118,12 +113,27 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
BucketID: bucketID,
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
}
|
||||
optionsToSend := make([]fs.OpenOption, 0, len(options))
|
||||
if newInfo == nil {
|
||||
modTime := src.ModTime(ctx)
|
||||
modTime, err := o.getModTime(ctx, src, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request.ContentType = fs.MimeType(ctx, src)
|
||||
request.Info = map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
}
|
||||
// Custom upload headers - remove header prefix since they are sent in the body
|
||||
for _, option := range options {
|
||||
k, v := option.Header()
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasPrefix(k, headerPrefix) {
|
||||
request.Info[k[len(headerPrefix):]] = v
|
||||
} else {
|
||||
optionsToSend = append(optionsToSend, option)
|
||||
}
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum || doCopy {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
@@ -134,6 +144,11 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
Options: optionsToSend,
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
|
||||
14
backend/cache/cache_internal_test.go
vendored
14
backend/cache/cache_internal_test.go
vendored
@@ -33,7 +33,7 @@ import (
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -123,10 +123,10 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
|
||||
/* TODO: is this testing something?
|
||||
func TestInternalVfsCache(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 30
|
||||
vfscommon.Opt.DirCacheTime = time.Second * 30
|
||||
testSize := int64(524288000)
|
||||
|
||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||
vfscommon.Opt.CacheMode = vfs.CacheModeWrites
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
@@ -338,7 +338,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -368,7 +368,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
@@ -708,7 +708,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 4) // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -743,7 +743,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
vfscommon.Opt.DirCacheTime = fs.Duration(time.Second * 10)
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
||||
@@ -566,7 +566,7 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
opt := &filter.Opt{}
|
||||
opt := &filter.Options{}
|
||||
err := opt.MaxAge.Set("1h")
|
||||
assert.NoError(t, err)
|
||||
flt, err := filter.NewFilter(opt)
|
||||
|
||||
@@ -59,7 +59,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
//"utime" - read-only
|
||||
//"content-type" - read-only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, false, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
||||
@@ -1568,32 +1568,47 @@ func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
var vol string
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
vol = filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
s = s[len(vol):]
|
||||
}
|
||||
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
||||
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
||||
if enc != encoder.Standard {
|
||||
s = filepath.ToSlash(s)
|
||||
parts := strings.Split(s, "/")
|
||||
encoded := make([]string, len(parts))
|
||||
changed := false
|
||||
for i, p := range parts {
|
||||
if (p == ".") || (p == "..") {
|
||||
encoded[i] = p
|
||||
continue
|
||||
}
|
||||
part := enc.FromStandardName(p)
|
||||
changed = changed || part != p
|
||||
encoded[i] = part
|
||||
}
|
||||
if changed {
|
||||
s = strings.Join(encoded, "/")
|
||||
}
|
||||
s = filepath.FromSlash(s)
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = vol + s
|
||||
}
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
if !noUNC {
|
||||
// Convert to UNC. It does nothing on non windows platforms.
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
@@ -2538,6 +2538,9 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
||||
@@ -379,7 +379,7 @@ func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, p
|
||||
}
|
||||
|
||||
expectedMeta.Set("permissions", marshalPerms(t, perms))
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, content, true, "plain/text", expectedMeta)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, false, content, true, "plain/text", expectedMeta)
|
||||
do, ok := obj.(fs.Metadataer)
|
||||
require.True(t, ok)
|
||||
actualMeta, err := do.Metadata(ctx)
|
||||
|
||||
@@ -176,7 +176,7 @@ type File struct {
|
||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||
FileExtension string `json:"file_extension,omitempty"`
|
||||
FolderType string `json:"folder_type,omitempty"`
|
||||
Hash string `json:"hash,omitempty"` // sha1 but NOT a valid file hash. looks like a torrent hash
|
||||
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||
@@ -486,7 +486,7 @@ type RequestNewFile struct {
|
||||
ParentID string `json:"parent_id"`
|
||||
FolderType string `json:"folder_type"`
|
||||
// only when uploading a new file
|
||||
Hash string `json:"hash,omitempty"` // sha1sum
|
||||
Hash string `json:"hash,omitempty"` // gcid
|
||||
Resumable map[string]string `json:"resumable,omitempty"` // {"provider": "PROVIDER_ALIYUN"}
|
||||
Size int64 `json:"size,omitempty"`
|
||||
UploadType string `json:"upload_type,omitempty"` // "UPLOAD_TYPE_FORM" or "UPLOAD_TYPE_RESUMABLE"
|
||||
|
||||
@@ -8,18 +8,22 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/pikpak/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Globals
|
||||
const (
|
||||
cachePrefix = "rclone-pikpak-sha1sum-"
|
||||
cachePrefix = "rclone-pikpak-gcid-"
|
||||
)
|
||||
|
||||
// requestDecompress requests decompress of compressed files
|
||||
@@ -82,19 +86,21 @@ func (f *Fs) getVIPInfo(ctx context.Context) (info *api.VIP, err error) {
|
||||
// action can be one of batch{Copy,Delete,Trash,Untrash}
|
||||
func (f *Fs) requestBatchAction(ctx context.Context, action string, req *api.RequestBatch) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
Method: "POST",
|
||||
Path: "/drive/v1/files:" + action,
|
||||
}
|
||||
info := struct {
|
||||
TaskID string `json:"task_id"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, nil)
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, &req, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("batch action %q failed: %w", action, err)
|
||||
}
|
||||
return nil
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// requestNewTask requests a new api.NewTask and returns api.Task
|
||||
@@ -148,6 +154,9 @@ func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error)
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err == nil {
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -179,8 +188,8 @@ func (f *Fs) getTask(ctx context.Context, ID string, checkPhase bool) (info *api
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if checkPhase {
|
||||
if err == nil && info.Phase != api.PhaseTypeComplete {
|
||||
// could be pending right after file is created/uploaded.
|
||||
return true, errors.New(info.Phase)
|
||||
// could be pending right after the task is created
|
||||
return true, fmt.Errorf("%s (%s) is still in %s", info.Name, info.Type, info.Phase)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
@@ -188,6 +197,18 @@ func (f *Fs) getTask(ctx context.Context, ID string, checkPhase bool) (info *api
|
||||
return
|
||||
}
|
||||
|
||||
// waitTask waits for async tasks to be completed
|
||||
func (f *Fs) waitTask(ctx context.Context, ID string) (err error) {
|
||||
time.Sleep(taskWaitTime)
|
||||
if info, err := f.getTask(ctx, ID, true); err != nil {
|
||||
if info == nil {
|
||||
return fmt.Errorf("can't verify the task is completed: %q", ID)
|
||||
}
|
||||
return fmt.Errorf("can't verify the task is completed: %#v", info)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// deleteTask remove a task having the specified ID
|
||||
func (f *Fs) deleteTask(ctx context.Context, ID string, deleteFiles bool) (err error) {
|
||||
params := url.Values{}
|
||||
@@ -235,16 +256,42 @@ func (f *Fs) requestShare(ctx context.Context, req *api.RequestShare) (info *api
|
||||
return
|
||||
}
|
||||
|
||||
// Read the sha1 of in returning a reader which will read the same contents
|
||||
// getGcid retrieves Gcid cached in API server
|
||||
func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err error) {
|
||||
cid, err := calcCid(ctx, src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
params := url.Values{}
|
||||
params.Set("cid", cid)
|
||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/drive/v1/resource/cid",
|
||||
Parameters: params,
|
||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
||||
}
|
||||
|
||||
info := struct {
|
||||
Gcid string `json:"gcid,omitempty"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return info.Gcid, nil
|
||||
}
|
||||
|
||||
// Read the gcid of in returning a reader which will read the same contents
|
||||
//
|
||||
// The cleanup function should be called when out is finished with
|
||||
// regardless of whether this function returned an error or not.
|
||||
func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need an SHA1
|
||||
hash := sha1.New()
|
||||
// use the teeReader to write to the local file AND calculate the SHA1 while doing so
|
||||
teeReader := io.TeeReader(in, hash)
|
||||
|
||||
func readGcid(in io.Reader, size, threshold int64) (gcid string, out io.Reader, cleanup func(), err error) {
|
||||
// nothing to clean up by default
|
||||
cleanup = func() {}
|
||||
|
||||
@@ -267,8 +314,11 @@ func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reade
|
||||
_ = os.Remove(tempFile.Name()) // delete the cache file after we are done - may be deleted already
|
||||
}
|
||||
|
||||
// copy the ENTIRE file to disc and calculate the SHA1 in the process
|
||||
if _, err = io.Copy(tempFile, teeReader); err != nil {
|
||||
// use the teeReader to write to the local file AND calculate the gcid while doing so
|
||||
teeReader := io.TeeReader(in, tempFile)
|
||||
|
||||
// copy the ENTIRE file to disk and calculate the gcid in the process
|
||||
if gcid, err = calcGcid(teeReader, size); err != nil {
|
||||
return
|
||||
}
|
||||
// jump to the start of the local file so we can pass it along
|
||||
@@ -279,15 +329,102 @@ func readSHA1(in io.Reader, size, threshold int64) (sha1sum string, out io.Reade
|
||||
// replace the already read source with a reader of our cached file
|
||||
out = tempFile
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
buf := &bytes.Buffer{}
|
||||
teeReader := io.TeeReader(in, buf)
|
||||
|
||||
if gcid, err = calcGcid(teeReader, size); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// set the reader to our read memory block
|
||||
out = bytes.NewReader(inData)
|
||||
out = buf
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil)), out, cleanup, nil
|
||||
return
|
||||
}
|
||||
|
||||
// calcGcid calculates Gcid from reader
|
||||
//
|
||||
// Gcid is a custom hash to index a file contents
|
||||
func calcGcid(r io.Reader, size int64) (string, error) {
|
||||
calcBlockSize := func(j int64) int64 {
|
||||
var psize int64 = 0x40000
|
||||
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||
psize = psize << 1
|
||||
}
|
||||
return psize
|
||||
}
|
||||
|
||||
totalHash := sha1.New()
|
||||
blockHash := sha1.New()
|
||||
readSize := calcBlockSize(size)
|
||||
for {
|
||||
blockHash.Reset()
|
||||
if n, err := io.CopyN(blockHash, r, readSize); err != nil && n == 0 {
|
||||
if err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
break
|
||||
}
|
||||
totalHash.Write(blockHash.Sum(nil))
|
||||
}
|
||||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// calcCid calculates Cid from source
|
||||
//
|
||||
// Cid is a simplified version of Gcid
|
||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||
srcObj := fs.UnWrapObjectInfo(src)
|
||||
if srcObj == nil {
|
||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
hash := sha1.New()
|
||||
var rc io.ReadCloser
|
||||
|
||||
readHash := func(start, length int64) (err error) {
|
||||
end := start + length - 1
|
||||
if rc, err = srcObj.Open(ctx, &fs.RangeOption{Start: start, End: end}); err != nil {
|
||||
return fmt.Errorf("failed to open src with range (%d, %d): %w", start, end, err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
_, err = io.Copy(hash, rc)
|
||||
return err
|
||||
}
|
||||
|
||||
if size <= 0xF000 { // 61440 = 60KB
|
||||
err = readHash(0, size)
|
||||
} else { // 20KB from three different parts
|
||||
for _, start := range []int64{0, size / 3, size - 0x5000} {
|
||||
err = readHash(start, 0x5000)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to hash: %w", err)
|
||||
}
|
||||
cid = strings.ToUpper(hex.EncodeToString(hash.Sum(nil)))
|
||||
return
|
||||
}
|
||||
|
||||
// randomly generates device id used for request header 'x-device-id'
|
||||
//
|
||||
// original javascript implementation
|
||||
//
|
||||
// return "xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx".replace(/[xy]/g, (e) => {
|
||||
// const t = (16 * Math.random()) | 0;
|
||||
// return ("x" == e ? t : (3 & t) | 8).toString(16);
|
||||
// });
|
||||
func genDeviceID() string {
|
||||
base := []byte("xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx")
|
||||
for i, char := range base {
|
||||
switch char {
|
||||
case 'x':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16))[0]
|
||||
case 'y':
|
||||
base[i] = fmt.Sprintf("%x", rand.Intn(16)&3|8)[0]
|
||||
}
|
||||
}
|
||||
return string(base)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ package pikpak
|
||||
|
||||
// md5sum is not always available, sometimes given empty.
|
||||
|
||||
// sha1sum used for upload differs from the one with official apps.
|
||||
|
||||
// Trashed files are not restored to the original location when using `batchUntrash`
|
||||
|
||||
// Can't stream without `--vfs-cache-mode=full`
|
||||
@@ -69,7 +67,7 @@ const (
|
||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
waitTime = 500 * time.Millisecond
|
||||
taskWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
rootURL = "https://api-drive.mypikpak.com"
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
@@ -276,6 +274,7 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
rootFolderID string // the id of the root folder
|
||||
deviceID string // device id used for api requests
|
||||
client *http.Client // authorized client
|
||||
m configmap.Mapper
|
||||
tokenMu *sync.Mutex // when renewing tokens
|
||||
@@ -291,6 +290,7 @@ type Object struct {
|
||||
modTime time.Time // modification time of the object
|
||||
mimeType string // The object MIME type
|
||||
parent string // ID of the parent directories
|
||||
gcid string // custom hash of the object
|
||||
md5sum string // md5sum of the object
|
||||
link *api.Link // link to download the object
|
||||
linkMu *sync.Mutex
|
||||
@@ -490,6 +490,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
CanHaveEmptyDirectories: true, // can have empty directories
|
||||
NoMultiThreading: true, // can't have multiple threads downloading
|
||||
}).Fill(ctx, f)
|
||||
f.deviceID = genDeviceID()
|
||||
|
||||
if err := f.newClientWithPacer(ctx); err != nil {
|
||||
return nil, err
|
||||
@@ -917,19 +918,21 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/trash:empty",
|
||||
NoResponse: true, // Only returns `{"task_id":""}
|
||||
Method: "PATCH",
|
||||
Path: "/drive/v1/files/trash:empty",
|
||||
}
|
||||
info := struct {
|
||||
TaskID string `json:"task_id"`
|
||||
}{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.Call(ctx, &opts)
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't empty trash: %w", err)
|
||||
}
|
||||
return nil
|
||||
return f.waitTask(ctx, info.TaskID)
|
||||
}
|
||||
|
||||
// Move the object
|
||||
@@ -1015,6 +1018,7 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
|
||||
o = &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
parent: dirID,
|
||||
size: size,
|
||||
modTime: modTime,
|
||||
linkMu: new(sync.Mutex),
|
||||
@@ -1047,7 +1051,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1059,7 +1063,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Manually update info of moved object to save API calls
|
||||
dstObj.id = srcObj.id
|
||||
dstObj.mimeType = srcObj.mimeType
|
||||
dstObj.gcid = srcObj.gcid
|
||||
dstObj.md5sum = srcObj.md5sum
|
||||
dstObj.hasMetaData = true
|
||||
|
||||
if srcLeaf != dstLeaf {
|
||||
// Rename
|
||||
@@ -1067,16 +1076,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: couldn't locate moved file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1116,7 +1116,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
// Create temporary object - still missing id, mimeType, gcid, md5sum
|
||||
dstObj, dstLeaf, dstParentID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1130,6 +1130,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
// Update info of the copied object with new parent but source name
|
||||
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
} else if err = dstObj.setMetaData(info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Can't copy and change name in one step so we have to check if we have
|
||||
// the correct name after copy
|
||||
@@ -1144,16 +1150,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// Update info
|
||||
err = dstObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
|
||||
}
|
||||
return dstObj, dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1222,7 +1219,7 @@ func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, s
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str string, size int64, options ...fs.OpenOption) (info *api.File, err error) {
|
||||
func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string, size int64, options ...fs.OpenOption) (info *api.File, err error) {
|
||||
// determine upload type
|
||||
uploadType := api.UploadTypeResumable
|
||||
// if size >= 0 && size < int64(5*fs.Mebi) {
|
||||
@@ -1237,7 +1234,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
|
||||
ParentID: parentIDForRequest(dirID),
|
||||
FolderType: "NORMAL",
|
||||
Size: size,
|
||||
Hash: strings.ToUpper(sha1Str),
|
||||
Hash: strings.ToUpper(gcid),
|
||||
UploadType: uploadType,
|
||||
}
|
||||
if uploadType == api.UploadTypeResumable {
|
||||
@@ -1262,8 +1259,8 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
|
||||
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
|
||||
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
|
||||
}
|
||||
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
|
||||
time.Sleep(taskWaitTime)
|
||||
})()
|
||||
|
||||
if uploadType == api.UploadTypeForm && new.Form != nil {
|
||||
@@ -1277,12 +1274,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload: %w", err)
|
||||
}
|
||||
fs.Debugf(leaf, "sleeping for %v before checking upload status", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
if _, err = f.getTask(ctx, new.Task.ID, true); err != nil {
|
||||
return nil, fmt.Errorf("unable to complete the upload: %w", err)
|
||||
}
|
||||
return new.File, nil
|
||||
return new.File, f.waitTask(ctx, new.Task.ID)
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -1506,6 +1498,7 @@ func (o *Object) setMetaData(info *api.File) (err error) {
|
||||
} else {
|
||||
o.parent = info.ParentID
|
||||
}
|
||||
o.gcid = info.Hash
|
||||
o.md5sum = info.Md5Checksum
|
||||
if info.Links.ApplicationOctetStream != nil {
|
||||
o.link = info.Links.ApplicationOctetStream
|
||||
@@ -1579,9 +1572,6 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.md5sum == "" {
|
||||
return "", nil
|
||||
}
|
||||
return strings.ToLower(o.md5sum), nil
|
||||
}
|
||||
|
||||
@@ -1705,25 +1695,40 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate sha1sum; grabbed from package jottacloud
|
||||
hashStr, err := src.Hash(ctx, hash.SHA1)
|
||||
if err != nil || hashStr == "" {
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
var wrap accounting.WrapFn
|
||||
in, wrap = accounting.UnWrap(in)
|
||||
var cleanup func()
|
||||
hashStr, in, cleanup, err = readSHA1(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate SHA1: %w", err)
|
||||
// Calculate gcid; grabbed from package jottacloud
|
||||
gcid, err := o.fs.getGcid(ctx, src)
|
||||
if err != nil || gcid == "" {
|
||||
fs.Debugf(o, "calculating gcid: %v", err)
|
||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||
// No buffering; directly calculate gcid from source
|
||||
rc, err := srcObj.Open(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open src: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(rc, &err)
|
||||
|
||||
if gcid, err = calcGcid(rc, srcObj.Size()); err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
} else {
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
var wrap accounting.WrapFn
|
||||
in, wrap = accounting.UnWrap(in)
|
||||
var cleanup func()
|
||||
gcid, in, cleanup, err = readGcid(in, size, int64(o.fs.opt.HashMemoryThreshold))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate gcid: %w", err)
|
||||
}
|
||||
// Wrap the accounting back onto the stream
|
||||
in = wrap(in)
|
||||
}
|
||||
// Wrap the accounting back onto the stream
|
||||
in = wrap(in)
|
||||
}
|
||||
fs.Debugf(o, "gcid = %s", gcid)
|
||||
|
||||
if !withTemp {
|
||||
info, err := o.fs.upload(ctx, in, leaf, dirID, hashStr, size, options...)
|
||||
info, err := o.fs.upload(ctx, in, leaf, dirID, gcid, size, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1732,7 +1737,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
||||
|
||||
// We have to fall back to upload + rename
|
||||
tempName := "rcloneTemp" + random.String(8)
|
||||
info, err := o.fs.upload(ctx, in, tempName, dirID, hashStr, size, options...)
|
||||
info, err := o.fs.upload(ctx, in, tempName, dirID, gcid, size, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -883,7 +883,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, *o.originalSize)
|
||||
if o.originalSize != nil {
|
||||
fs.FixRangeOption(options, *o.originalSize)
|
||||
}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options { // if the caller passes in nil for options, it will become array of nil
|
||||
switch x := option.(type) {
|
||||
|
||||
@@ -1415,8 +1415,8 @@ func init() {
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Northest 1 endpoint",
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
|
||||
@@ -58,7 +58,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
// "tier" - read only
|
||||
// "btime" - read only
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", metadata)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
|
||||
@@ -339,13 +339,13 @@ cost of using more memory.
|
||||
Note that setting this is very likely to cause deadlocks so it should
|
||||
be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure concurrency is one more
|
||||
If you are doing a sync or copy then make sure connections is one more
|
||||
than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then it just needs to be one more than the
|
||||
maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
So for |connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
`, "|", "`", -1),
|
||||
@@ -561,7 +561,7 @@ type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
modTime uint32 // modification time of the object as unix time
|
||||
mode os.FileMode // mode bits from the file
|
||||
md5sum *string // Cached MD5 checksum
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
@@ -815,13 +815,13 @@ func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
if cErr := c.closed(); cErr == nil {
|
||||
cErr = c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
fs.Debugf(f, "Ignoring error closing connection: %v", cErr)
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
@@ -1957,7 +1957,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// ModTime returns the modification time of the remote sftp file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
return time.Unix(int64(o.modTime), 0)
|
||||
}
|
||||
|
||||
// path returns the native SFTP path of the object
|
||||
@@ -1972,7 +1972,7 @@ func (o *Object) shellPath() string {
|
||||
|
||||
// setMetadata updates the info in the object from the stat result passed in
|
||||
func (o *Object) setMetadata(info os.FileInfo) {
|
||||
o.modTime = info.ModTime()
|
||||
o.modTime = info.Sys().(*sftp.FileStat).Mtime
|
||||
o.size = info.Size()
|
||||
o.mode = info.Mode()
|
||||
}
|
||||
@@ -2195,7 +2195,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// In the specific case of o.fs.opt.SetModTime == false
|
||||
// if the object wasn't found then don't return an error
|
||||
fs.Debugf(o, "Not found after upload with set_modtime=false so returning best guess")
|
||||
o.modTime = src.ModTime(ctx)
|
||||
o.modTime = uint32(src.ModTime(ctx).Unix())
|
||||
o.size = src.Size()
|
||||
o.mode = os.FileMode(0666) // regular file
|
||||
} else if err != nil {
|
||||
|
||||
@@ -278,6 +278,36 @@ provider.`,
|
||||
Value: "pca",
|
||||
Help: "OVH Public Cloud Archive",
|
||||
}},
|
||||
}, {
|
||||
Name: "fetch_until_empty_page",
|
||||
Help: `When paginating, always fetch unless we received an empty page.
|
||||
|
||||
Consider using this option if rclone listings show fewer objects
|
||||
than expected, or if repeated syncs copy unchanged objects.
|
||||
|
||||
It is safe to enable this, but rclone may make more API calls than
|
||||
necessary.
|
||||
|
||||
This is one of a pair of workarounds to handle implementations
|
||||
of the Swift API that do not implement pagination as expected. See
|
||||
also "partial_page_fetch_threshold".`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "partial_page_fetch_threshold",
|
||||
Help: `When paginating, fetch if the current page is within this percentage of the limit.
|
||||
|
||||
Consider using this option if rclone listings show fewer objects
|
||||
than expected, or if repeated syncs copy unchanged objects.
|
||||
|
||||
It is safe to enable this, but rclone may make more API calls than
|
||||
necessary.
|
||||
|
||||
This is one of a pair of workarounds to handle implementations
|
||||
of the Swift API that do not implement pagination as expected. See
|
||||
also "fetch_until_empty_page".`,
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}}, SharedOptions...),
|
||||
})
|
||||
}
|
||||
@@ -308,6 +338,8 @@ type Options struct {
|
||||
NoLargeObjects bool `config:"no_large_objects"`
|
||||
UseSegmentsContainer fs.Tristate `config:"use_segments_container"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
FetchUntilEmptyPage bool `config:"fetch_until_empty_page"`
|
||||
PartialPageFetchThreshold int `config:"partial_page_fetch_threshold"`
|
||||
}
|
||||
|
||||
// Fs represents a remote swift server
|
||||
@@ -462,6 +494,8 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
FetchUntilEmptyPage: opt.FetchUntilEmptyPage,
|
||||
PartialPageFetchThreshold: opt.PartialPageFetchThreshold,
|
||||
}
|
||||
if opt.EnvAuth {
|
||||
err := c.ApplyEnvironment()
|
||||
|
||||
61
cmd/cmd.go
61
cmd/cmd.go
@@ -14,7 +14,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
@@ -29,11 +28,10 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
fslog "github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
fssync "github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -50,7 +48,6 @@ var (
|
||||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file", "Debugging")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file", "Debugging")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)", "Logging")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second", "Logging")
|
||||
version bool
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
@@ -383,6 +380,12 @@ func StartStats() func() {
|
||||
|
||||
// initConfig is run by cobra after initialising the flags
|
||||
func initConfig() {
|
||||
// Set the global options from the flags
|
||||
err := fs.GlobalOptionsInit()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialise global options: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
@@ -409,12 +412,6 @@ func initConfig() {
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
// Write the args for debug purposes
|
||||
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
|
||||
|
||||
@@ -424,7 +421,7 @@ func initConfig() {
|
||||
}
|
||||
|
||||
// Start the remote control server if configured
|
||||
_, err = rcserver.Start(context.Background(), &rcflags.Opt)
|
||||
_, err = rcserver.Start(context.Background(), &rc.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start remote control: %v", err)
|
||||
}
|
||||
@@ -473,13 +470,6 @@ func initConfig() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if m, _ := regexp.MatchString("^(bits|bytes)$", *dataRateUnit); !m {
|
||||
fs.Errorf(nil, "Invalid unit passed to --stats-unit. Defaulting to bytes.")
|
||||
ci.DataRateUnit = "bytes"
|
||||
} else {
|
||||
ci.DataRateUnit = *dataRateUnit
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
@@ -522,41 +512,12 @@ var backendFlags map[string]struct{}
|
||||
func AddBackendFlags() {
|
||||
backendFlags = map[string]struct{}{}
|
||||
for _, fsInfo := range fs.Registry {
|
||||
done := map[string]struct{}{}
|
||||
flags.AddFlagsFromOptions(pflag.CommandLine, fsInfo.Prefix, fsInfo.Options)
|
||||
// Store the backend flag names for the help generator
|
||||
for i := range fsInfo.Options {
|
||||
opt := &fsInfo.Options[i]
|
||||
// Skip if done already (e.g. with Provider options)
|
||||
if _, doneAlready := done[opt.Name]; doneAlready {
|
||||
continue
|
||||
}
|
||||
done[opt.Name] = struct{}{}
|
||||
// Make a flag from each option
|
||||
name := opt.FlagName(fsInfo.Prefix)
|
||||
found := pflag.CommandLine.Lookup(name) != nil
|
||||
if !found {
|
||||
// Take first line of help only
|
||||
help := strings.TrimSpace(opt.Help)
|
||||
if nl := strings.IndexRune(help, '\n'); nl >= 0 {
|
||||
help = help[:nl]
|
||||
}
|
||||
help = strings.TrimRight(strings.TrimSpace(help), ".!?")
|
||||
if opt.IsPassword {
|
||||
help += " (obscured)"
|
||||
}
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
flags.SetDefaultFromEnv(pflag.CommandLine, name)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
// Hide on the command line if requested
|
||||
if opt.Hide&fs.OptionHideCommandLine != 0 {
|
||||
flag.Hidden = true
|
||||
}
|
||||
backendFlags[name] = struct{}{}
|
||||
} else {
|
||||
fs.Errorf(nil, "Not adding duplicate flag --%s", name)
|
||||
}
|
||||
// flag.Hidden = true
|
||||
backendFlags[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func findOption(name string, options []string) (found bool) {
|
||||
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
|
||||
// Options
|
||||
options = []string{
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", opt.AttrTimeout.Seconds()),
|
||||
"-o", fmt.Sprintf("attr_timeout=%g", time.Duration(opt.AttrTimeout).Seconds()),
|
||||
}
|
||||
if opt.DebugFUSE {
|
||||
options = append(options, "-o", "debug")
|
||||
@@ -79,7 +79,7 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
|
||||
@@ -29,8 +29,6 @@ type frontmatter struct {
|
||||
Date string
|
||||
Title string
|
||||
Description string
|
||||
Slug string
|
||||
URL string
|
||||
Source string
|
||||
Annotations map[string]string
|
||||
}
|
||||
@@ -38,8 +36,6 @@ type frontmatter struct {
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
slug: {{ .Slug }}
|
||||
url: {{ .URL }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
@@ -112,10 +108,14 @@ rclone.org website.`,
|
||||
Date: now,
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: commands[name].Short,
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Annotations: commands[name].Annotations,
|
||||
Annotations: map[string]string{},
|
||||
}
|
||||
// Filter out annotations that confuse hugo from the frontmatter
|
||||
for k, v := range commands[name].Annotations {
|
||||
if k != "groups" {
|
||||
data.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
|
||||
@@ -30,10 +30,10 @@ var _ fusefs.Node = (*Dir)(nil)
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Valid = d.fsys.opt.AttrTimeout
|
||||
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
|
||||
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
@@ -77,7 +77,7 @@ func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.Lo
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
resp.EntryValid = d.fsys.opt.AttrTimeout
|
||||
resp.EntryValid = time.Duration(d.fsys.opt.AttrTimeout)
|
||||
// Check the mnode to see if it has a fuse Node cached
|
||||
// We must return the same fuse nodes for vfs Nodes
|
||||
node, ok := mnode.Sys().(fusefs.Node)
|
||||
|
||||
@@ -4,6 +4,7 @@ package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@@ -25,13 +26,13 @@ var _ fusefs.Node = (*File)(nil)
|
||||
// Attr fills out the attributes for the file
|
||||
func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(f, "")("a=%+v, err=%v", a, &err)
|
||||
a.Valid = f.fsys.opt.AttrTimeout
|
||||
a.Valid = time.Duration(f.fsys.opt.AttrTimeout)
|
||||
modTime := f.File.ModTime()
|
||||
Size := uint64(f.File.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
a.Gid = f.VFS().Opt.GID
|
||||
a.Uid = f.VFS().Opt.UID
|
||||
a.Mode = f.VFS().Opt.FilePerms
|
||||
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
|
||||
@@ -6,6 +6,7 @@ package mount
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -50,7 +51,7 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
options = append(options, fuse.WritebackCache())
|
||||
}
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(opt.DaemonTimeout.Seconds()))))
|
||||
options = append(options, fuse.DaemonTimeout(fmt.Sprint(int(time.Duration(opt.DaemonTimeout).Seconds()))))
|
||||
}
|
||||
if len(opt.ExtraOptions) > 0 {
|
||||
fs.Errorf(nil, "-o/--option not supported with this FUSE backend")
|
||||
|
||||
@@ -7,6 +7,7 @@ package mount2
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
@@ -88,14 +89,14 @@ func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
// fill in AttrOut from node
|
||||
func (f *FS) setAttrOut(node vfs.Node, out *fuse.AttrOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetTimeout(f.opt.AttrTimeout)
|
||||
out.SetTimeout(time.Duration(f.opt.AttrTimeout))
|
||||
}
|
||||
|
||||
// fill in EntryOut from node
|
||||
func (f *FS) setEntryOut(node vfs.Node, out *fuse.EntryOut) {
|
||||
setAttr(node, &out.Attr)
|
||||
out.SetEntryTimeout(f.opt.AttrTimeout)
|
||||
out.SetAttrTimeout(f.opt.AttrTimeout)
|
||||
out.SetEntryTimeout(time.Duration(f.opt.AttrTimeout))
|
||||
out.SetAttrTimeout(time.Duration(f.opt.AttrTimeout))
|
||||
}
|
||||
|
||||
// Translate errors from mountlib into Syscall error numbers
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
@@ -215,8 +216,8 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
// FIXME fill out
|
||||
opts := fusefs.Options{
|
||||
MountOptions: *mountOpts,
|
||||
EntryTimeout: &opt.AttrTimeout,
|
||||
AttrTimeout: &opt.AttrTimeout,
|
||||
EntryTimeout: (*time.Duration)(&opt.AttrTimeout),
|
||||
AttrTimeout: (*time.Duration)(&opt.AttrTimeout),
|
||||
GID: VFS.Opt.GID,
|
||||
UID: VFS.Opt.UID,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/daemonize"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
@@ -36,38 +35,161 @@ func help(commandName string) string {
|
||||
return strings.TrimSpace(strings.ReplaceAll(mountHelp, "@", commandName)) + "\n\n"
|
||||
}
|
||||
|
||||
// Options for creating the mount
|
||||
type Options struct {
|
||||
DebugFUSE bool
|
||||
AllowNonEmpty bool
|
||||
AllowRoot bool
|
||||
AllowOther bool
|
||||
DefaultPermissions bool
|
||||
WritebackCache bool
|
||||
Daemon bool
|
||||
DaemonWait time.Duration // time to wait for ready mount from daemon, maximum on Linux or constant on macOS/BSD
|
||||
MaxReadAhead fs.SizeSuffix
|
||||
ExtraOptions []string
|
||||
ExtraFlags []string
|
||||
AttrTimeout time.Duration // how long the kernel caches attribute for
|
||||
DeviceName string
|
||||
VolumeName string
|
||||
NoAppleDouble bool
|
||||
NoAppleXattr bool
|
||||
DaemonTimeout time.Duration // OSXFUSE only
|
||||
AsyncRead bool
|
||||
NetworkMode bool // Windows only
|
||||
DirectIO bool // use Direct IO for file access
|
||||
CaseInsensitive fs.Tristate
|
||||
// OptionsInfo describes the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "debug_fuse",
|
||||
Default: false,
|
||||
Help: "Debug the FUSE internals - needs -v",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "attr_timeout",
|
||||
Default: fs.Duration(1 * time.Second),
|
||||
Help: "Time for which file/directory attributes are cached",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "option",
|
||||
Default: []string{},
|
||||
Help: "Option for libfuse/WinFsp (repeat if required)",
|
||||
Groups: "Mount",
|
||||
ShortOpt: "o",
|
||||
}, {
|
||||
Name: "fuse_flag",
|
||||
Default: []string{},
|
||||
Help: "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "daemon",
|
||||
Default: false,
|
||||
Help: "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "daemon_timeout",
|
||||
Default: func() fs.Duration {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// DaemonTimeout defaults to non-zero for macOS
|
||||
// (this is a macOS specific kernel option unrelated to DaemonWait)
|
||||
return fs.Duration(10 * time.Minute)
|
||||
}
|
||||
return 0
|
||||
}(),
|
||||
Help: "Time limit for rclone to respond to kernel (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "default_permissions",
|
||||
Default: false,
|
||||
Help: "Makes kernel enforce access control based on the file mode (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "allow_non_empty",
|
||||
Default: false,
|
||||
Help: "Allow mounting over a non-empty directory (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "allow_root",
|
||||
Default: false,
|
||||
Help: "Allow access to root user (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "allow_other",
|
||||
Default: false,
|
||||
Help: "Allow access to other users (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "async_read",
|
||||
Default: true,
|
||||
Help: "Use asynchronous reads (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "max_read_ahead",
|
||||
Default: fs.SizeSuffix(128 * 1024),
|
||||
Help: "The number of bytes that can be prefetched for sequential reads (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "write_back_cache",
|
||||
Default: false,
|
||||
Help: "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "devname",
|
||||
Default: "",
|
||||
Help: "Set the device name - default is remote:path",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "mount_case_insensitive",
|
||||
Default: fs.Tristate{},
|
||||
Help: "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "direct_io",
|
||||
Default: false,
|
||||
Help: "Use Direct IO, disables caching of data",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "volname",
|
||||
Default: "",
|
||||
Help: "Set the volume name (supported on Windows and OSX only)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "noappledouble",
|
||||
Default: true,
|
||||
Help: "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "noapplexattr",
|
||||
Default: false,
|
||||
Help: "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "network_mode",
|
||||
Default: false,
|
||||
Help: "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)",
|
||||
Groups: "Mount",
|
||||
}, {
|
||||
Name: "daemon_wait",
|
||||
Default: func() fs.Duration {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
// Linux provides /proc/mounts to check mount status
|
||||
// so --daemon-wait means *maximum* time to wait
|
||||
return fs.Duration(60 * time.Second)
|
||||
case "darwin", "openbsd", "freebsd", "netbsd":
|
||||
// On BSD we can't check mount status yet
|
||||
// so --daemon-wait is just a *constant* delay
|
||||
return fs.Duration(5 * time.Second)
|
||||
}
|
||||
return 0
|
||||
}(),
|
||||
Help: "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)",
|
||||
Groups: "Mount",
|
||||
}}
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "mount", Opt: &Opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values for creating the mount
|
||||
var DefaultOpt = Options{
|
||||
MaxReadAhead: 128 * 1024,
|
||||
AttrTimeout: 1 * time.Second, // how long the kernel caches attribute for
|
||||
NoAppleDouble: true, // use noappledouble by default
|
||||
NoAppleXattr: false, // do not use noapplexattr by default
|
||||
AsyncRead: true, // do async reads by default
|
||||
// Options for creating the mount
|
||||
type Options struct {
|
||||
DebugFUSE bool `config:"debug_fuse"`
|
||||
AllowNonEmpty bool `config:"allow_non_empty"`
|
||||
AllowRoot bool `config:"allow_root"`
|
||||
AllowOther bool `config:"allow_other"`
|
||||
DefaultPermissions bool `config:"default_permissions"`
|
||||
WritebackCache bool `config:"write_back_cache"`
|
||||
Daemon bool `config:"daemon"`
|
||||
DaemonWait fs.Duration `config:"daemon_wait"` // time to wait for ready mount from daemon, maximum on Linux or constant on macOS/BSD
|
||||
MaxReadAhead fs.SizeSuffix `config:"max_read_ahead"`
|
||||
ExtraOptions []string `config:"option"`
|
||||
ExtraFlags []string `config:"fuse_flag"`
|
||||
AttrTimeout fs.Duration `config:"attr_timeout"` // how long the kernel caches attribute for
|
||||
DeviceName string `config:"devname"`
|
||||
VolumeName string `config:"volname"`
|
||||
NoAppleDouble bool `config:"noappledouble"`
|
||||
NoAppleXattr bool `config:"noapplexattr"`
|
||||
DaemonTimeout fs.Duration `config:"daemon_timeout"` // OSXFUSE only
|
||||
AsyncRead bool `config:"async_read"`
|
||||
NetworkMode bool `config:"network_mode"` // Windows only
|
||||
DirectIO bool `config:"direct_io"` // use Direct IO for file access
|
||||
CaseInsensitive fs.Tristate `config:"mount_case_insensitive"`
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -106,61 +228,12 @@ const (
|
||||
MaxLeafSize = 1024 // don't pass file names longer than this
|
||||
)
|
||||
|
||||
func init() {
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
// DaemonTimeout defaults to non-zero for macOS
|
||||
// (this is a macOS specific kernel option unrelated to DaemonWait)
|
||||
DefaultOpt.DaemonTimeout = 10 * time.Minute
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
// Linux provides /proc/mounts to check mount status
|
||||
// so --daemon-wait means *maximum* time to wait
|
||||
DefaultOpt.DaemonWait = 60 * time.Second
|
||||
case "darwin", "openbsd", "freebsd", "netbsd":
|
||||
// On BSD we can't check mount status yet
|
||||
// so --daemon-wait is just a *constant* delay
|
||||
DefaultOpt.DaemonWait = 5 * time.Second
|
||||
}
|
||||
|
||||
// Opt must be assigned in the init block to ensure changes really get in
|
||||
Opt = DefaultOpt
|
||||
}
|
||||
|
||||
// Opt contains options set by command line flags
|
||||
var Opt Options
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("mount", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v", "Mount")
|
||||
flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached", "Mount")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp (repeat if required)", "Mount")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)", "Mount")
|
||||
// Non-Windows only
|
||||
flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)", "Mount")
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)", "Mount")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)", "Mount")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path", "Mount")
|
||||
flags.FVarP(flagSet, &Opt.CaseInsensitive, "mount-case-insensitive", "", "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.DirectIO, "direct-io", "", Opt.DirectIO, "Use Direct IO, disables caching of data", "Mount")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)", "Mount")
|
||||
// OSX only
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)", "Mount")
|
||||
// Windows only
|
||||
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)", "Mount")
|
||||
// Unix only
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)", "Mount")
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -228,7 +301,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfsflags.Opt)
|
||||
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfscommon.Opt)
|
||||
mountDaemon, err := mnt.Mount()
|
||||
|
||||
// Wait for foreground mount, if any...
|
||||
@@ -258,7 +331,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
handle := atexit.Register(func() {
|
||||
killDaemon("Got interrupt")
|
||||
})
|
||||
err = WaitMountReady(mnt.MountPoint, Opt.DaemonWait, mountDaemon)
|
||||
err = WaitMountReady(mnt.MountPoint, time.Duration(Opt.DaemonWait), mountDaemon)
|
||||
if err != nil {
|
||||
killDaemon("Daemon timed out")
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -85,7 +85,7 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vfsOpt := vfsflags.Opt
|
||||
vfsOpt := vfscommon.Opt
|
||||
err = in.GetStructMissingOK("vfsOpt", &vfsOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
@@ -37,17 +38,17 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if rcflags.Opt.Enabled {
|
||||
if rc.Opt.Enabled {
|
||||
log.Fatalf("Don't supply --rc flag when using rcd")
|
||||
}
|
||||
|
||||
// Start the rc
|
||||
rcflags.Opt.Enabled = true
|
||||
rc.Opt.Enabled = true
|
||||
if len(args) > 0 {
|
||||
rcflags.Opt.Files = args[0]
|
||||
rc.Opt.Files = args[0]
|
||||
}
|
||||
|
||||
s, err := rcserver.Start(context.Background(), &rcflags.Opt)
|
||||
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start remote control: %v", err)
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -127,14 +128,14 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
|
||||
}
|
||||
|
||||
s := &server{
|
||||
AnnounceInterval: opt.AnnounceInterval,
|
||||
AnnounceInterval: time.Duration(opt.AnnounceInterval),
|
||||
FriendlyName: friendlyName,
|
||||
RootDeviceUUID: makeDeviceUUID(friendlyName),
|
||||
Interfaces: interfaces,
|
||||
waitChan: make(chan struct{}),
|
||||
httpListenAddr: opt.ListenAddr,
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
vfs: vfs.New(f, &vfscommon.Opt),
|
||||
}
|
||||
|
||||
s.services = map[string]UPnPService{
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := dlnaflags.DefaultOpt
|
||||
opt := dlnaflags.Opt
|
||||
opt.ListenAddr = testBindAddress
|
||||
var err error
|
||||
dlnaServer, err = newServer(f, &opt)
|
||||
|
||||
@@ -4,8 +4,8 @@ package dlnaflags
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
@@ -24,39 +24,46 @@ logging of all UPNP traffic.
|
||||
|
||||
`
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string
|
||||
FriendlyName string
|
||||
LogTrace bool
|
||||
InterfaceNames []string
|
||||
AnnounceInterval time.Duration
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: ":7879",
|
||||
Help: "The ip:port or :port to bind the DLNA http server to",
|
||||
}, {
|
||||
Name: "name",
|
||||
Default: "",
|
||||
Help: "Name of DLNA server",
|
||||
}, {
|
||||
Name: "log_trace",
|
||||
Default: false,
|
||||
Help: "Enable trace logging of SOAP traffic",
|
||||
}, {
|
||||
Name: "interface",
|
||||
Default: []string{},
|
||||
Help: "The interface to use for SSDP (repeat as necessary)",
|
||||
}, {
|
||||
Name: "announce_interval",
|
||||
Default: fs.Duration(12 * time.Minute),
|
||||
Help: "The interval between SSDP announcements",
|
||||
}}
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// DefaultOpt contains the defaults options for DLNA serving.
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: ":7879",
|
||||
FriendlyName: "",
|
||||
LogTrace: false,
|
||||
InterfaceNames: []string{},
|
||||
AnnounceInterval: 12 * time.Minute,
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string `config:"addr"`
|
||||
FriendlyName string `config:"name"`
|
||||
LogTrace bool `config:"log_trace"`
|
||||
InterfaceNames []string `config:"interface"`
|
||||
AnnounceInterval fs.Duration `config:"announce_interval"`
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
var (
|
||||
Opt = DefaultOpt
|
||||
)
|
||||
|
||||
func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
|
||||
rc.AddOption("dlna", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "The ip:port or :port to bind the DLNA http server to", prefix)
|
||||
flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "Name of DLNA server", prefix)
|
||||
flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "Enable trace logging of SOAP traffic", prefix)
|
||||
flags.StringArrayVarP(flagSet, &Opt.InterfaceNames, prefix+"interface", "", Opt.InterfaceNames, "The interface to use for SSDP (repeat as necessary)", prefix)
|
||||
flags.DurationVarP(flagSet, &Opt.AnnounceInterval, prefix+"announce-interval", "", Opt.AnnounceInterval, "The interval between SSDP announcements", prefix)
|
||||
}
|
||||
var Opt Options
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
addFlagsPrefix(flagSet, "", &Opt)
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
// Driver implements docker driver api
|
||||
@@ -55,7 +54,7 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
|
||||
mntOpt = &mountlib.Opt
|
||||
}
|
||||
if vfsOpt == nil {
|
||||
vfsOpt = &vfsflags.Opt
|
||||
vfsOpt = &vfscommon.Opt
|
||||
}
|
||||
drv := &Driver{
|
||||
root: root,
|
||||
|
||||
@@ -2,7 +2,6 @@ package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -88,7 +86,7 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
fsType = "local"
|
||||
if fsName != "" {
|
||||
var ok bool
|
||||
fsType, ok = fs.ConfigMap(nil, fsName, nil).Get("type")
|
||||
fsType, ok = fs.ConfigMap("", nil, fsName, nil).Get("type")
|
||||
if !ok {
|
||||
return fs.ErrorNotFoundInConfigFile
|
||||
}
|
||||
@@ -185,7 +183,7 @@ func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok boo
|
||||
case "debug-fuse":
|
||||
mntOpt.DebugFUSE, err = opt.GetBool(key)
|
||||
case "attr-timeout":
|
||||
mntOpt.AttrTimeout, err = opt.GetDuration(key)
|
||||
mntOpt.AttrTimeout, err = opt.GetFsDuration(key)
|
||||
case "option":
|
||||
mntOpt.ExtraOptions, err = getStringArray(opt, key)
|
||||
case "fuse-flag":
|
||||
@@ -193,7 +191,7 @@ func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok boo
|
||||
case "daemon":
|
||||
mntOpt.Daemon, err = opt.GetBool(key)
|
||||
case "daemon-timeout":
|
||||
mntOpt.DaemonTimeout, err = opt.GetDuration(key)
|
||||
mntOpt.DaemonTimeout, err = opt.GetFsDuration(key)
|
||||
case "default-permissions":
|
||||
mntOpt.DefaultPermissions, err = opt.GetBool(key)
|
||||
case "allow-non-empty":
|
||||
@@ -231,9 +229,9 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
||||
case "vfs-cache-mode":
|
||||
err = getFVarP(&vfsOpt.CacheMode, opt, key)
|
||||
case "vfs-cache-poll-interval":
|
||||
vfsOpt.CachePollInterval, err = opt.GetDuration(key)
|
||||
vfsOpt.CachePollInterval, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-age":
|
||||
vfsOpt.CacheMaxAge, err = opt.GetDuration(key)
|
||||
vfsOpt.CacheMaxAge, err = opt.GetFsDuration(key)
|
||||
case "vfs-cache-max-size":
|
||||
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
|
||||
case "vfs-read-chunk-size":
|
||||
@@ -243,11 +241,11 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
||||
case "vfs-case-insensitive":
|
||||
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
|
||||
case "vfs-write-wait":
|
||||
vfsOpt.WriteWait, err = opt.GetDuration(key)
|
||||
vfsOpt.WriteWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-wait":
|
||||
vfsOpt.ReadWait, err = opt.GetDuration(key)
|
||||
vfsOpt.ReadWait, err = opt.GetFsDuration(key)
|
||||
case "vfs-write-back":
|
||||
vfsOpt.WriteBack, err = opt.GetDuration(key)
|
||||
vfsOpt.WriteBack, err = opt.GetFsDuration(key)
|
||||
case "vfs-read-ahead":
|
||||
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
|
||||
case "vfs-used-is-size":
|
||||
@@ -259,28 +257,19 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
||||
case "no-checksum":
|
||||
vfsOpt.NoChecksum, err = opt.GetBool(key)
|
||||
case "dir-cache-time":
|
||||
vfsOpt.DirCacheTime, err = opt.GetDuration(key)
|
||||
vfsOpt.DirCacheTime, err = opt.GetFsDuration(key)
|
||||
case "poll-interval":
|
||||
vfsOpt.PollInterval, err = opt.GetDuration(key)
|
||||
vfsOpt.PollInterval, err = opt.GetFsDuration(key)
|
||||
case "read-only":
|
||||
vfsOpt.ReadOnly, err = opt.GetBool(key)
|
||||
case "dir-perms":
|
||||
perms := &vfsflags.FileMode{Mode: &vfsOpt.DirPerms}
|
||||
err = getFVarP(perms, opt, key)
|
||||
err = getFVarP(&vfsOpt.DirPerms, opt, key)
|
||||
case "file-perms":
|
||||
perms := &vfsflags.FileMode{Mode: &vfsOpt.FilePerms}
|
||||
err = getFVarP(perms, opt, key)
|
||||
err = getFVarP(&vfsOpt.FilePerms, opt, key)
|
||||
|
||||
// unprefixed unix-only vfs options
|
||||
case "umask":
|
||||
// GetInt64 doesn't support the `0octal` umask syntax - parse locally
|
||||
var strVal string
|
||||
if strVal, err = opt.GetString(key); err == nil {
|
||||
var longVal int64
|
||||
if longVal, err = strconv.ParseInt(strVal, 0, 0); err == nil {
|
||||
vfsOpt.Umask = int(longVal)
|
||||
}
|
||||
}
|
||||
err = getFVarP(&vfsOpt.Umask, opt, key)
|
||||
case "uid":
|
||||
intVal, err = opt.GetInt64(key)
|
||||
vfsOpt.UID = uint32(intVal)
|
||||
|
||||
@@ -25,48 +25,63 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
ftp "goftp.io/server/v2"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: "localhost:2121",
|
||||
Help: "IPaddress:Port or :Port to bind server to",
|
||||
}, {
|
||||
Name: "public_ip",
|
||||
Default: "",
|
||||
Help: "Public IP address to advertise for passive connections",
|
||||
}, {
|
||||
Name: "passive_port",
|
||||
Default: "30000-32000",
|
||||
Help: "Passive port range to use",
|
||||
}, {
|
||||
Name: "user",
|
||||
Default: "anonymous",
|
||||
Help: "User name for authentication",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Default: "",
|
||||
Help: "Password for authentication (empty value allow every password)",
|
||||
}, {
|
||||
Name: "cert",
|
||||
Default: "",
|
||||
Help: "TLS PEM key (concatenation of certificate and CA certificate)",
|
||||
}, {
|
||||
Name: "key",
|
||||
Default: "",
|
||||
Help: "TLS PEM Private key",
|
||||
}}
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
//TODO add more options
|
||||
ListenAddr string // Port to listen on
|
||||
PublicIP string // Passive ports range
|
||||
PassivePorts string // Passive ports range
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
TLSCert string // TLS PEM key (concatenation of certificate and CA certificate)
|
||||
TLSKey string // TLS PEM Private key
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2121",
|
||||
PublicIP: "",
|
||||
PassivePorts: "30000-32000",
|
||||
BasicUser: "anonymous",
|
||||
BasicPass: "",
|
||||
ListenAddr string `config:"addr"` // Port to listen on
|
||||
PublicIP string `config:"public_ip"` // Passive ports range
|
||||
PassivePorts string `config:"passive_port"` // Passive ports range
|
||||
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||
BasicPass string `config:"pass"` // password for BasicUser
|
||||
TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate)
|
||||
TLSKey string `config:"key"` // TLS PEM Private key
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
var Opt Options
|
||||
|
||||
// AddFlags adds flags for ftp
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("ftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to", "")
|
||||
flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections", "")
|
||||
flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use", "")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication", "")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication (empty value allow every password)", "")
|
||||
flags.StringVarP(flagSet, &Opt.TLSCert, "cert", "", Opt.TLSCert, "TLS PEM key (concatenation of certificate and CA certificate)", "")
|
||||
flags.StringVarP(flagSet, &Opt.TLSKey, "key", "", Opt.TLSKey, "TLS PEM Private key", "")
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -157,7 +172,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
|
||||
d.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
d.userPass = make(map[string]string, 16)
|
||||
} else {
|
||||
d.globalVFS = vfs.New(f, &vfsflags.Opt)
|
||||
d.globalVFS = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
d.useTLS = d.opt.TLSKey != ""
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ const (
|
||||
func TestFTP(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt := Opt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.BasicUser = testUSER
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -148,7 +149,7 @@ func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
|
||||
// override auth
|
||||
s.opt.Auth.CustomAuthFn = s.auth
|
||||
} else {
|
||||
s._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
s._vfs = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
|
||||
s.server, err = libhttp.NewServer(ctx,
|
||||
@@ -215,7 +216,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, s.server.HTMLTemplate())
|
||||
for _, node := range dirEntries {
|
||||
if vfsflags.Opt.NoModTime {
|
||||
if vfscommon.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
} else {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), node.ModTime().UTC())
|
||||
|
||||
@@ -15,26 +15,39 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: "",
|
||||
Help: "IPaddress:Port or :Port to bind server to",
|
||||
}, {
|
||||
Name: "nfs_cache_handle_limit",
|
||||
Default: 1000000,
|
||||
Help: "max file handles cached simultaneously (min 5)",
|
||||
}}
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// Options contains options for the NFS Server
|
||||
type Options struct {
|
||||
ListenAddr string // Port to listen on
|
||||
HandleLimit int // max file handles cached by go-nfs CachingHandler
|
||||
ListenAddr string `config:"addr"` // Port to listen on
|
||||
HandleLimit int `config:"nfs_cache_handle_limit"` // max file handles cached by go-nfs CachingHandler
|
||||
}
|
||||
|
||||
var opt Options
|
||||
|
||||
// AddFlags adds flags for serve nfs (and nfsmount)
|
||||
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
|
||||
rc.AddOption("nfs", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to", "")
|
||||
flags.IntVarP(flagSet, &Opt.HandleLimit, "nfs-cache-handle-limit", "", 1000000, "max file handles cached simultaneously (min 5)", "")
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -48,7 +61,7 @@ func Run(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s, err := NewServer(context.Background(), vfs.New(f, &vfsflags.Opt), &opt)
|
||||
s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
libcache "github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
// Help contains text describing how to use the proxy
|
||||
@@ -242,7 +242,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
|
||||
// need to in memory. An attacker would find it easier to go
|
||||
// after the unencrypted password in memory most likely.
|
||||
entry := cacheEntry{
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
vfs: vfs.New(f, &vfscommon.Opt),
|
||||
pwHash: sha256.Sum256([]byte(auth)),
|
||||
}
|
||||
return entry, true, nil
|
||||
|
||||
@@ -25,22 +25,26 @@ var (
|
||||
// backend for gofakes3
|
||||
type s3Backend struct {
|
||||
opt *Options
|
||||
vfs *vfs.VFS
|
||||
s *Server
|
||||
meta *sync.Map
|
||||
}
|
||||
|
||||
// newBackend creates a new SimpleBucketBackend.
|
||||
func newBackend(vfs *vfs.VFS, opt *Options) gofakes3.Backend {
|
||||
func newBackend(s *Server, opt *Options) gofakes3.Backend {
|
||||
return &s3Backend{
|
||||
vfs: vfs,
|
||||
opt: opt,
|
||||
s: s,
|
||||
meta: new(sync.Map),
|
||||
}
|
||||
}
|
||||
|
||||
// ListBuckets always returns the default bucket.
|
||||
func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, error) {
|
||||
dirEntries, err := getDirEntries("/", b.vfs)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirEntries, err := getDirEntries("/", _vfs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -60,7 +64,11 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
||||
|
||||
// ListBucket lists the objects in the given bucket.
|
||||
func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofakes3.Prefix, page gofakes3.ListBucketPage) (*gofakes3.ObjectList, error) {
|
||||
_, err := b.vfs.Stat(bucket)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = _vfs.Stat(bucket)
|
||||
if err != nil {
|
||||
return nil, gofakes3.BucketNotFound(bucket)
|
||||
}
|
||||
@@ -79,7 +87,7 @@ func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofak
|
||||
response := gofakes3.NewObjectList()
|
||||
path, remaining := prefixParser(prefix)
|
||||
|
||||
err = b.entryListR(bucket, path, remaining, prefix.HasDelimiter, response)
|
||||
err = b.entryListR(_vfs, bucket, path, remaining, prefix.HasDelimiter, response)
|
||||
if err == gofakes3.ErrNoSuchKey {
|
||||
// AWS just returns an empty list
|
||||
response = gofakes3.NewObjectList()
|
||||
@@ -94,13 +102,17 @@ func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofak
|
||||
//
|
||||
// Note that the metadata is not supported yet.
|
||||
func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName string) (*gofakes3.Object, error) {
|
||||
_, err := b.vfs.Stat(bucketName)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = _vfs.Stat(bucketName)
|
||||
if err != nil {
|
||||
return nil, gofakes3.BucketNotFound(bucketName)
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
node, err := b.vfs.Stat(fp)
|
||||
node, err := _vfs.Stat(fp)
|
||||
if err != nil {
|
||||
return nil, gofakes3.KeyNotFound(objectName)
|
||||
}
|
||||
@@ -141,13 +153,17 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_, err = b.vfs.Stat(bucketName)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = _vfs.Stat(bucketName)
|
||||
if err != nil {
|
||||
return nil, gofakes3.BucketNotFound(bucketName)
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
node, err := b.vfs.Stat(fp)
|
||||
node, err := _vfs.Stat(fp)
|
||||
if err != nil {
|
||||
return nil, gofakes3.KeyNotFound(objectName)
|
||||
}
|
||||
@@ -223,9 +239,13 @@ func (b *s3Backend) storeModtime(fp string, meta map[string]string, val string)
|
||||
|
||||
// TouchObject creates or updates meta on specified object.
|
||||
func (b *s3Backend) TouchObject(ctx context.Context, fp string, meta map[string]string) (result gofakes3.PutObjectResult, err error) {
|
||||
_, err = b.vfs.Stat(fp)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
_, err = _vfs.Stat(fp)
|
||||
if err == vfs.ENOENT {
|
||||
f, err := b.vfs.Create(fp)
|
||||
f, err := _vfs.Create(fp)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -235,7 +255,7 @@ func (b *s3Backend) TouchObject(ctx context.Context, fp string, meta map[string]
|
||||
return result, err
|
||||
}
|
||||
|
||||
_, err = b.vfs.Stat(fp)
|
||||
_, err = _vfs.Stat(fp)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -246,7 +266,7 @@ func (b *s3Backend) TouchObject(ctx context.Context, fp string, meta map[string]
|
||||
ti, err := swift.FloatStringToTime(val)
|
||||
if err == nil {
|
||||
b.storeModtime(fp, meta, val)
|
||||
return result, b.vfs.Chtimes(fp, ti, ti)
|
||||
return result, _vfs.Chtimes(fp, ti, ti)
|
||||
}
|
||||
// ignore error since the file is successfully created
|
||||
}
|
||||
@@ -255,7 +275,7 @@ func (b *s3Backend) TouchObject(ctx context.Context, fp string, meta map[string]
|
||||
ti, err := swift.FloatStringToTime(val)
|
||||
if err == nil {
|
||||
b.storeModtime(fp, meta, val)
|
||||
return result, b.vfs.Chtimes(fp, ti, ti)
|
||||
return result, _vfs.Chtimes(fp, ti, ti)
|
||||
}
|
||||
// ignore error since the file is successfully created
|
||||
}
|
||||
@@ -270,7 +290,11 @@ func (b *s3Backend) PutObject(
|
||||
meta map[string]string,
|
||||
input io.Reader, size int64,
|
||||
) (result gofakes3.PutObjectResult, err error) {
|
||||
_, err = b.vfs.Stat(bucketName)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
_, err = _vfs.Stat(bucketName)
|
||||
if err != nil {
|
||||
return result, gofakes3.BucketNotFound(bucketName)
|
||||
}
|
||||
@@ -284,12 +308,12 @@ func (b *s3Backend) PutObject(
|
||||
// }
|
||||
|
||||
if objectDir != "." {
|
||||
if err := mkdirRecursive(objectDir, b.vfs); err != nil {
|
||||
if err := mkdirRecursive(objectDir, _vfs); err != nil {
|
||||
return result, err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := b.vfs.Create(fp)
|
||||
f, err := _vfs.Create(fp)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -297,17 +321,17 @@ func (b *s3Backend) PutObject(
|
||||
if _, err := io.Copy(f, input); err != nil {
|
||||
// remove file when i/o error occurred (FsPutErr)
|
||||
_ = f.Close()
|
||||
_ = b.vfs.Remove(fp)
|
||||
_ = _vfs.Remove(fp)
|
||||
return result, err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
// remove file when close error occurred (FsPutErr)
|
||||
_ = b.vfs.Remove(fp)
|
||||
_ = _vfs.Remove(fp)
|
||||
return result, err
|
||||
}
|
||||
|
||||
_, err = b.vfs.Stat(fp)
|
||||
_, err = _vfs.Stat(fp)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@@ -318,16 +342,13 @@ func (b *s3Backend) PutObject(
|
||||
ti, err := swift.FloatStringToTime(val)
|
||||
if err == nil {
|
||||
b.storeModtime(fp, meta, val)
|
||||
return result, b.vfs.Chtimes(fp, ti, ti)
|
||||
return result, _vfs.Chtimes(fp, ti, ti)
|
||||
}
|
||||
// ignore error since the file is successfully created
|
||||
}
|
||||
|
||||
if val, ok := meta["mtime"]; ok {
|
||||
ti, err := swift.FloatStringToTime(val)
|
||||
if err == nil {
|
||||
if val, ok := meta["mtime"]; ok {
|
||||
b.storeModtime(fp, meta, val)
|
||||
return result, b.vfs.Chtimes(fp, ti, ti)
|
||||
return result, _vfs.Chtimes(fp, ti, ti)
|
||||
}
|
||||
// ignore error since the file is successfully created
|
||||
}
|
||||
@@ -338,7 +359,7 @@ func (b *s3Backend) PutObject(
|
||||
// DeleteMulti deletes multiple objects in a single request.
|
||||
func (b *s3Backend) DeleteMulti(ctx context.Context, bucketName string, objects ...string) (result gofakes3.MultiDeleteResult, rerr error) {
|
||||
for _, object := range objects {
|
||||
if err := b.deleteObject(bucketName, object); err != nil {
|
||||
if err := b.deleteObject(ctx, bucketName, object); err != nil {
|
||||
fs.Errorf("serve s3", "delete object failed: %v", err)
|
||||
result.Error = append(result.Error, gofakes3.ErrorResult{
|
||||
Code: gofakes3.ErrInternal,
|
||||
@@ -357,12 +378,16 @@ func (b *s3Backend) DeleteMulti(ctx context.Context, bucketName string, objects
|
||||
|
||||
// DeleteObject deletes the object with the given name.
|
||||
func (b *s3Backend) DeleteObject(ctx context.Context, bucketName, objectName string) (result gofakes3.ObjectDeleteResult, rerr error) {
|
||||
return result, b.deleteObject(bucketName, objectName)
|
||||
return result, b.deleteObject(ctx, bucketName, objectName)
|
||||
}
|
||||
|
||||
// deleteObject deletes the object from the filesystem.
|
||||
func (b *s3Backend) deleteObject(bucketName, objectName string) error {
|
||||
_, err := b.vfs.Stat(bucketName)
|
||||
func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName string) error {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = _vfs.Stat(bucketName)
|
||||
if err != nil {
|
||||
return gofakes3.BucketNotFound(bucketName)
|
||||
}
|
||||
@@ -370,18 +395,22 @@ func (b *s3Backend) deleteObject(bucketName, objectName string) error {
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := b.vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME: unsafe operation
|
||||
rmdirRecursive(fp, b.vfs)
|
||||
rmdirRecursive(fp, _vfs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
func (b *s3Backend) CreateBucket(ctx context.Context, name string) error {
|
||||
_, err := b.vfs.Stat(name)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = _vfs.Stat(name)
|
||||
if err != nil && err != vfs.ENOENT {
|
||||
return gofakes3.ErrInternal
|
||||
}
|
||||
@@ -390,7 +419,7 @@ func (b *s3Backend) CreateBucket(ctx context.Context, name string) error {
|
||||
return gofakes3.ErrBucketAlreadyExists
|
||||
}
|
||||
|
||||
if err := b.vfs.Mkdir(name, 0755); err != nil {
|
||||
if err := _vfs.Mkdir(name, 0755); err != nil {
|
||||
return gofakes3.ErrInternal
|
||||
}
|
||||
return nil
|
||||
@@ -398,12 +427,16 @@ func (b *s3Backend) CreateBucket(ctx context.Context, name string) error {
|
||||
|
||||
// DeleteBucket deletes the bucket with the given name.
|
||||
func (b *s3Backend) DeleteBucket(ctx context.Context, name string) error {
|
||||
_, err := b.vfs.Stat(name)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = _vfs.Stat(name)
|
||||
if err != nil {
|
||||
return gofakes3.BucketNotFound(name)
|
||||
}
|
||||
|
||||
if err := b.vfs.Remove(name); err != nil {
|
||||
if err := _vfs.Remove(name); err != nil {
|
||||
return gofakes3.ErrBucketNotEmpty
|
||||
}
|
||||
|
||||
@@ -412,7 +445,11 @@ func (b *s3Backend) DeleteBucket(ctx context.Context, name string) error {
|
||||
|
||||
// BucketExists checks if the bucket exists.
|
||||
func (b *s3Backend) BucketExists(ctx context.Context, name string) (exists bool, err error) {
|
||||
_, err = b.vfs.Stat(name)
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = _vfs.Stat(name)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@@ -422,6 +459,10 @@ func (b *s3Backend) BucketExists(ctx context.Context, name string) (exists bool,
|
||||
|
||||
// CopyObject copy specified object from srcKey to dstKey.
|
||||
func (b *s3Backend) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket, dstKey string, meta map[string]string) (result gofakes3.CopyObjectResult, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
fp := path.Join(srcBucket, srcKey)
|
||||
if srcBucket == dstBucket && srcKey == dstKey {
|
||||
b.meta.Store(fp, meta)
|
||||
@@ -439,10 +480,10 @@ func (b *s3Backend) CopyObject(ctx context.Context, srcBucket, srcKey, dstBucket
|
||||
}
|
||||
b.storeModtime(fp, meta, val)
|
||||
|
||||
return result, b.vfs.Chtimes(fp, ti, ti)
|
||||
return result, _vfs.Chtimes(fp, ti, ti)
|
||||
}
|
||||
|
||||
cStat, err := b.vfs.Stat(fp)
|
||||
cStat, err := _vfs.Stat(fp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -5,12 +5,13 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/gofakes3"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
func (b *s3Backend) entryListR(bucket, fdPath, name string, addPrefix bool, response *gofakes3.ObjectList) error {
|
||||
func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPrefix bool, response *gofakes3.ObjectList) error {
|
||||
fp := path.Join(bucket, fdPath)
|
||||
|
||||
dirEntries, err := getDirEntries(fp, b.vfs)
|
||||
dirEntries, err := getDirEntries(fp, _vfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -30,7 +31,7 @@ func (b *s3Backend) entryListR(bucket, fdPath, name string, addPrefix bool, resp
|
||||
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(bucket, path.Join(fdPath, object), "", false, response)
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
httplib "github.com/rclone/rclone/lib/http"
|
||||
@@ -20,6 +22,7 @@ var DefaultOpt = Options{
|
||||
hashName: "MD5",
|
||||
hashType: hash.MD5,
|
||||
noCleanup: false,
|
||||
Auth: httplib.DefaultAuthCfg(),
|
||||
HTTP: httplib.DefaultCfg(),
|
||||
}
|
||||
|
||||
@@ -30,8 +33,10 @@ const flagPrefix = ""
|
||||
|
||||
func init() {
|
||||
flagSet := Command.Flags()
|
||||
httplib.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
|
||||
httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "")
|
||||
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "")
|
||||
flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "")
|
||||
@@ -55,10 +60,15 @@ var Command = &cobra.Command{
|
||||
},
|
||||
Use: "s3 remote:path",
|
||||
Short: `Serve remote:path over s3.`,
|
||||
Long: help() + httplib.Help(flagPrefix) + vfs.Help(),
|
||||
Long: help() + httplib.AuthHelp(flagPrefix) + httplib.Help(flagPrefix) + vfs.Help(),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
|
||||
if Opt.hashName == "auto" {
|
||||
Opt.hashType = f.Hashes().GetOne()
|
||||
@@ -73,13 +83,13 @@ var Command = &cobra.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
router := s.Router()
|
||||
router := s.server.Router()
|
||||
s.Bind(router)
|
||||
err = s.serve()
|
||||
err = s.Serve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Wait()
|
||||
s.server.Wait()
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
|
||||
@@ -9,10 +9,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -37,7 +36,7 @@ const (
|
||||
)
|
||||
|
||||
// Configure and serve the server
|
||||
func serveS3(f fs.Fs) (testURL string, keyid string, keysec string) {
|
||||
func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
|
||||
keyid = random.String(16)
|
||||
keysec = random.String(16)
|
||||
serveropt := &Options{
|
||||
@@ -49,12 +48,12 @@ func serveS3(f fs.Fs) (testURL string, keyid string, keysec string) {
|
||||
}
|
||||
|
||||
serveropt.HTTP.ListenAddr = []string{endpoint}
|
||||
w, _ := newServer(context.Background(), f, serveropt)
|
||||
router := w.Router()
|
||||
w, _ = newServer(context.Background(), f, serveropt)
|
||||
router := w.server.Router()
|
||||
|
||||
w.Bind(router)
|
||||
w.Serve()
|
||||
testURL = w.Server.URLs()[0]
|
||||
_ = w.Serve()
|
||||
testURL = w.server.URLs()[0]
|
||||
|
||||
return
|
||||
}
|
||||
@@ -63,7 +62,7 @@ func serveS3(f fs.Fs) (testURL string, keyid string, keysec string) {
|
||||
// s3 remote against it.
|
||||
func TestS3(t *testing.T) {
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
testURL, keyid, keysec := serveS3(f)
|
||||
testURL, keyid, keysec, _ := serveS3(f)
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "s3",
|
||||
@@ -76,62 +75,7 @@ func TestS3(t *testing.T) {
|
||||
return config, func() {}
|
||||
}
|
||||
|
||||
RunS3UnitTests(t, "s3", start)
|
||||
}
|
||||
|
||||
func RunS3UnitTests(t *testing.T, name string, start servetest.StartFn) {
|
||||
fstest.Initialise()
|
||||
ci := fs.GetConfig(context.Background())
|
||||
ci.DisableFeatures = append(ci.DisableFeatures, "Metadata")
|
||||
|
||||
fremote, _, clean, err := fstest.RandomRemote()
|
||||
assert.NoError(t, err)
|
||||
defer clean()
|
||||
|
||||
err = fremote.Mkdir(context.Background(), "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
f := fremote
|
||||
config, cleanup := start(f)
|
||||
defer cleanup()
|
||||
|
||||
// Change directory to run the tests
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
err = os.Chdir("../../../backend/" + name)
|
||||
require.NoError(t, err, "failed to cd to "+name+" backend")
|
||||
defer func() {
|
||||
// Change back to the old directory
|
||||
require.NoError(t, os.Chdir(cwd))
|
||||
}()
|
||||
|
||||
// RunS3UnitTests the backend tests with an on the fly remote
|
||||
args := []string{"test"}
|
||||
if testing.Verbose() {
|
||||
args = append(args, "-v")
|
||||
}
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
remoteName := "serve" + name + ":"
|
||||
args = append(args, "-remote", remoteName)
|
||||
args = append(args, "-run", "^TestIntegration$")
|
||||
args = append(args, "-list-retries", fmt.Sprint(*fstest.ListRetries))
|
||||
cmd := exec.Command("go", args...)
|
||||
|
||||
// Configure the backend with environment variables
|
||||
cmd.Env = os.Environ()
|
||||
prefix := "RCLONE_CONFIG_" + strings.ToUpper(remoteName[:len(remoteName)-1]) + "_"
|
||||
for k, v := range config {
|
||||
cmd.Env = append(cmd.Env, prefix+strings.ToUpper(k)+"="+v)
|
||||
}
|
||||
|
||||
// RunS3UnitTests the test
|
||||
out, err := cmd.CombinedOutput()
|
||||
if len(out) != 0 {
|
||||
t.Logf("\n----------\n%s----------\n", string(out))
|
||||
}
|
||||
assert.NoError(t, err, "Running "+name+" integration tests")
|
||||
servetest.Run(t, "s3", start)
|
||||
}
|
||||
|
||||
// tests using the minio client
|
||||
@@ -181,7 +125,7 @@ func TestEncodingWithMinioClient(t *testing.T) {
|
||||
_, err = f.Put(context.Background(), in, obji)
|
||||
assert.NoError(t, err)
|
||||
|
||||
endpoint, keyid, keysec := serveS3(f)
|
||||
endpoint, keyid, keysec, _ := serveS3(f)
|
||||
testURL, _ := url.Parse(endpoint)
|
||||
minioClient, err := minio.New(testURL.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(keyid, keysec, ""),
|
||||
@@ -200,5 +144,161 @@ func TestEncodingWithMinioClient(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type FileStuct struct {
|
||||
path string
|
||||
filename string
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
description string
|
||||
bucket string
|
||||
files []FileStuct
|
||||
keyID string
|
||||
keySec string
|
||||
shouldFail bool
|
||||
}
|
||||
|
||||
func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) {
|
||||
fstest.Initialise()
|
||||
|
||||
var f fs.Fs
|
||||
if useProxy {
|
||||
// the backend config will be made by the proxy
|
||||
prog, err := filepath.Abs("../servetest/proxy_code.go")
|
||||
require.NoError(t, err)
|
||||
files, err := filepath.Abs("testdata")
|
||||
require.NoError(t, err)
|
||||
cmd := "go run " + prog + " " + files
|
||||
|
||||
// FIXME: this is untidy setting a global variable!
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
|
||||
f = nil
|
||||
} else {
|
||||
// create a test Fs
|
||||
var err error
|
||||
f, err = fs.NewFs(context.Background(), "testdata")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
endpoint, keyid, keysec, s := serveS3(f)
|
||||
defer func() {
|
||||
assert.NoError(t, s.server.Shutdown())
|
||||
}()
|
||||
|
||||
if tt.keyID != "" {
|
||||
keyid = tt.keyID
|
||||
}
|
||||
if tt.keySec != "" {
|
||||
keysec = tt.keySec
|
||||
}
|
||||
|
||||
testURL, _ := url.Parse(endpoint)
|
||||
minioClient, err := minio.New(testURL.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(keyid, keysec, ""),
|
||||
Secure: false,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
buckets, err := minioClient.ListBuckets(context.Background())
|
||||
if tt.shouldFail {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, buckets)
|
||||
assert.Equal(t, buckets[0].Name, tt.bucket)
|
||||
|
||||
o := minioClient.ListObjects(context.Background(), tt.bucket, minio.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
})
|
||||
// save files after reading from channel
|
||||
objects := []string{}
|
||||
for object := range o {
|
||||
objects = append(objects, object.Key)
|
||||
}
|
||||
|
||||
for _, tt := range tt.files {
|
||||
file := path.Join(tt.path, tt.filename)
|
||||
found := false
|
||||
for _, fname := range objects {
|
||||
if file == fname {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Object not found: "+file)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListBuckets(t *testing.T) {
|
||||
var cases = []TestCase{
|
||||
{
|
||||
description: "list buckets",
|
||||
bucket: "mybucket",
|
||||
files: []FileStuct{
|
||||
{
|
||||
path: "",
|
||||
filename: "lorem.txt",
|
||||
},
|
||||
{
|
||||
path: "foo",
|
||||
filename: "bar.txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "list buckets: wrong s3 key",
|
||||
bucket: "mybucket",
|
||||
keyID: "invalid",
|
||||
shouldFail: true,
|
||||
},
|
||||
{
|
||||
description: "list buckets: wrong s3 secret",
|
||||
bucket: "mybucket",
|
||||
keySec: "invalid",
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
testListBuckets(t, cases, false)
|
||||
}
|
||||
|
||||
func TestListBucketsAuthProxy(t *testing.T) {
|
||||
var cases = []TestCase{
|
||||
{
|
||||
description: "list buckets",
|
||||
bucket: "mybucket",
|
||||
// request with random keyid
|
||||
// instead of what was set in 'authPair'
|
||||
keyID: random.String(16),
|
||||
files: []FileStuct{
|
||||
{
|
||||
path: "",
|
||||
filename: "lorem.txt",
|
||||
},
|
||||
{
|
||||
path: "foo",
|
||||
filename: "bar.txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "list buckets: wrong s3 secret",
|
||||
bucket: "mybucket",
|
||||
keySec: "invalid",
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
testListBuckets(t, cases, true)
|
||||
}
|
||||
|
||||
@@ -3,17 +3,30 @@ package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/rclone/gofakes3"
|
||||
"github.com/rclone/gofakes3/signature"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
httplib "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
type ctxKey int
|
||||
|
||||
const (
|
||||
ctxKeyID ctxKey = iota
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
@@ -24,17 +37,20 @@ type Options struct {
|
||||
hashType hash.Type
|
||||
authPair []string
|
||||
noCleanup bool
|
||||
Auth httplib.AuthConfig
|
||||
HTTP httplib.Config
|
||||
}
|
||||
|
||||
// Server is a s3.FileSystem interface
|
||||
type Server struct {
|
||||
*httplib.Server
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
faker *gofakes3.GoFakeS3
|
||||
handler http.Handler
|
||||
ctx context.Context // for global config
|
||||
server *httplib.Server
|
||||
f fs.Fs
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
faker *gofakes3.GoFakeS3
|
||||
handler http.Handler
|
||||
proxy *proxy.Proxy
|
||||
ctx context.Context // for global config
|
||||
s3Secret string
|
||||
}
|
||||
|
||||
// Make a new S3 Server to serve the remote
|
||||
@@ -42,16 +58,17 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
|
||||
w := &Server{
|
||||
f: f,
|
||||
ctx: ctx,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
|
||||
if len(opt.authPair) == 0 {
|
||||
fs.Logf("serve s3", "No auth provided so allowing anonymous access")
|
||||
} else {
|
||||
w.s3Secret = getAuthSecret(opt.authPair)
|
||||
}
|
||||
|
||||
var newLogger logger
|
||||
w.faker = gofakes3.New(
|
||||
newBackend(w.vfs, opt),
|
||||
newBackend(w, opt),
|
||||
gofakes3.WithHostBucket(!opt.pathBucketMode),
|
||||
gofakes3.WithLogger(newLogger),
|
||||
gofakes3.WithRequestID(rand.Uint64()),
|
||||
@@ -60,24 +77,124 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
w.Server, err = httplib.NewServer(ctx,
|
||||
w.handler = http.NewServeMux()
|
||||
w.handler = w.faker.Server()
|
||||
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
w.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
// proxy auth middleware
|
||||
w.handler = proxyAuthMiddleware(w.handler, w)
|
||||
w.handler = authPairMiddleware(w.handler, w)
|
||||
} else {
|
||||
w._vfs = vfs.New(f, &vfscommon.Opt)
|
||||
|
||||
if len(opt.authPair) > 0 {
|
||||
w.faker.AddAuthKeys(authlistResolver(opt.authPair))
|
||||
}
|
||||
}
|
||||
|
||||
w.server, err = httplib.NewServer(ctx,
|
||||
httplib.WithConfig(opt.HTTP),
|
||||
httplib.WithAuth(opt.Auth),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
}
|
||||
|
||||
w.handler = w.faker.Server()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Server) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if w._vfs != nil {
|
||||
return w._vfs, nil
|
||||
}
|
||||
|
||||
value := ctx.Value(ctxKeyID)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
|
||||
VFS, ok := value.(*vfs.VFS)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("context value is not VFS: %#v", value)
|
||||
}
|
||||
return VFS, nil
|
||||
}
|
||||
|
||||
// auth does proxy authorization
|
||||
func (w *Server) auth(accessKeyID string) (value interface{}, err error) {
|
||||
VFS, _, err := w.proxy.Call(stringToMd5Hash(accessKeyID), accessKeyID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
// Bind register the handler to http.Router
|
||||
func (w *Server) Bind(router chi.Router) {
|
||||
router.Handle("/*", w.handler)
|
||||
}
|
||||
|
||||
func (w *Server) serve() error {
|
||||
w.Serve()
|
||||
fs.Logf(w.f, "Starting s3 server on %s", w.URLs())
|
||||
// Serve serves the s3 server
|
||||
func (w *Server) Serve() error {
|
||||
w.server.Serve()
|
||||
fs.Logf(w.f, "Starting s3 server on %s", w.server.URLs())
|
||||
return nil
|
||||
}
|
||||
|
||||
func authPairMiddleware(next http.Handler, ws *Server) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
accessKey, _ := parseAccessKeyID(r)
|
||||
// set the auth pair
|
||||
authPair := map[string]string{
|
||||
accessKey: ws.s3Secret,
|
||||
}
|
||||
ws.faker.AddAuthKeys(authPair)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func proxyAuthMiddleware(next http.Handler, ws *Server) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
accessKey, _ := parseAccessKeyID(r)
|
||||
value, err := ws.auth(accessKey)
|
||||
if err != nil {
|
||||
fs.Infof(r.URL.Path, "%s: Auth failed: %v", r.RemoteAddr, err)
|
||||
}
|
||||
if value != nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ctxKeyID, value))
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func parseAccessKeyID(r *http.Request) (accessKey string, error signature.ErrorCode) {
|
||||
v4Auth := r.Header.Get("Authorization")
|
||||
req, err := signature.ParseSignV4(v4Auth)
|
||||
if err != signature.ErrNone {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return req.Credential.GetAccessKey(), signature.ErrNone
|
||||
}
|
||||
|
||||
func stringToMd5Hash(s string) string {
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(s))
|
||||
return hex.EncodeToString(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
func getAuthSecret(authPair []string) string {
|
||||
if len(authPair) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
splited := strings.Split(authPair[0], ",")
|
||||
if len(splited) != 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
secret := strings.TrimSpace(splited[1])
|
||||
return secret
|
||||
}
|
||||
|
||||
1
cmd/serve/s3/testdata/mybucket/foo/bar.txt
vendored
Normal file
1
cmd/serve/s3/testdata/mybucket/foo/bar.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
I am inside a folder
|
||||
1
cmd/serve/s3/testdata/mybucket/lorem.txt
vendored
Normal file
1
cmd/serve/s3/testdata/mybucket/lorem.txt
vendored
Normal file
@@ -0,0 +1 @@
|
||||
lorem epsum gipsum
|
||||
@@ -76,7 +76,7 @@ func run(t *testing.T, name string, start StartFn, useProxy bool) {
|
||||
if *fstest.Verbose {
|
||||
args = append(args, "-verbose")
|
||||
}
|
||||
remoteName := name + "test:"
|
||||
remoteName := "serve" + name + "test:"
|
||||
if *subRun != "" {
|
||||
args = append(args, "-run", *subRun)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
@@ -307,7 +307,7 @@ func serveStdio(f fs.Fs) error {
|
||||
stdin: os.Stdin,
|
||||
stdout: os.Stdout,
|
||||
}
|
||||
handlers := newVFSHandler(vfs.New(f, &vfsflags.Opt))
|
||||
handlers := newVFSHandler(vfs.New(f, &vfscommon.Opt))
|
||||
return serveChannel(sshChannel, handlers, "stdio")
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
@@ -54,7 +54,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) *server {
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
} else {
|
||||
s.vfs = vfs.New(f, &vfsflags.Opt)
|
||||
s.vfs = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -133,7 +133,7 @@ func (s *server) serve() (err error) {
|
||||
var authorizedKeysMap map[string]struct{}
|
||||
|
||||
// ensure the user isn't trying to use conflicting flags
|
||||
if proxyflags.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != DefaultOpt.AuthorizedKeys {
|
||||
if proxyflags.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
return errors.New("--auth-proxy and --authorized-keys cannot be used at the same time")
|
||||
}
|
||||
|
||||
@@ -142,7 +142,7 @@ func (s *server) serve() (err error) {
|
||||
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
|
||||
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
|
||||
// If user set the flag away from the default then report an error
|
||||
if err != nil && s.opt.AuthorizedKeys != DefaultOpt.AuthorizedKeys {
|
||||
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
|
||||
return err
|
||||
}
|
||||
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -19,36 +18,58 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: "localhost:2022",
|
||||
Help: "IPaddress:Port or :Port to bind server to",
|
||||
}, {
|
||||
Name: "key",
|
||||
Default: []string{},
|
||||
Help: "SSH private host key file (Can be multi-valued, leave blank to auto generate)",
|
||||
}, {
|
||||
Name: "authorized_keys",
|
||||
Default: "~/.ssh/authorized_keys",
|
||||
Help: "Authorized keys file",
|
||||
}, {
|
||||
Name: "user",
|
||||
Default: "",
|
||||
Help: "User name for authentication",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Default: "",
|
||||
Help: "Password for authentication",
|
||||
}, {
|
||||
Name: "no_auth",
|
||||
Default: false,
|
||||
Help: "Allow connections with no authentication if set",
|
||||
}, {
|
||||
Name: "stdio",
|
||||
Default: false,
|
||||
Help: "Run an sftp server on stdin/stdout",
|
||||
}}
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
ListenAddr string // Port to listen on
|
||||
HostKeys []string // Paths to private host keys
|
||||
AuthorizedKeys string // Path to authorized keys file
|
||||
User string // single username
|
||||
Pass string // password for user
|
||||
NoAuth bool // allow no authentication on connections
|
||||
Stdio bool // serve on stdio
|
||||
ListenAddr string `config:"addr"` // Port to listen on
|
||||
HostKeys []string `config:"key"` // Paths to private host keys
|
||||
AuthorizedKeys string `config:"authorized_keys"` // Path to authorized keys file
|
||||
User string `config:"user"` // single username
|
||||
Pass string `config:"pass"` // password for user
|
||||
NoAuth bool `config:"no_auth"` // allow no authentication on connections
|
||||
Stdio bool `config:"stdio"` // serve on stdio
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:2022",
|
||||
AuthorizedKeys: "~/.ssh/authorized_keys",
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "sftp", Opt: &Opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
var Opt Options
|
||||
|
||||
// AddFlags adds flags for the sftp
|
||||
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
|
||||
rc.AddOption("sftp", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to", "")
|
||||
flags.StringArrayVarP(flagSet, &Opt.HostKeys, "key", "", Opt.HostKeys, "SSH private host key file (Can be multi-valued, leave blank to auto generate)", "")
|
||||
flags.StringVarP(flagSet, &Opt.AuthorizedKeys, "authorized-keys", "", Opt.AuthorizedKeys, "Authorized keys file", "")
|
||||
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication", "")
|
||||
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication", "")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set", "")
|
||||
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on stdin/stdout", "")
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -40,7 +40,7 @@ var (
|
||||
func TestSftp(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := DefaultOpt
|
||||
opt := Opt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.User = testUser
|
||||
opt.Pass = testPass
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/webdav"
|
||||
@@ -193,7 +194,7 @@ func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error
|
||||
// override auth
|
||||
w.opt.Auth.CustomAuthFn = w.auth
|
||||
} else {
|
||||
w._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
w._vfs = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
|
||||
w.Server, err = libhttp.NewServer(ctx,
|
||||
@@ -365,7 +366,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, w.Server.HTMLTemplate())
|
||||
for _, node := range dirEntries {
|
||||
if vfsflags.Opt.NoModTime {
|
||||
if vfscommon.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
} else {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), node.ModTime().UTC())
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
assert.NotContains(t, out, "fileAA1.txt") // depth 4
|
||||
}
|
||||
|
||||
// Test of debug logging while initialising flags from environment (tests #5241 Enhance1)
|
||||
// Test of debug logging while initialising flags from environment (tests #5341 Enhance1)
|
||||
env = "RCLONE_STATS=173ms"
|
||||
out, err = rcloneEnv(env, "version", "-vv")
|
||||
if assert.NoError(t, err) {
|
||||
|
||||
@@ -865,3 +865,10 @@ put them back in again.` >}}
|
||||
* Michał Dzienisiewicz <michal.piotr.dz@gmail.com>
|
||||
* Florian Klink <flokli@flokli.de>
|
||||
* Bill Fraser <bill@wfraser.dev>
|
||||
* Thearas <thearas850@gmail.com>
|
||||
* Filipe Herculano <fifo_@live.com>
|
||||
* Russ Bubley <russ.bubley@googlemail.com>
|
||||
* Paul Collins <paul.collins@canonical.com>
|
||||
* Tomasz Melcer <liori@exroot.org>
|
||||
* itsHenry <2671230065@qq.com>
|
||||
* Ke Wang <me@ke.wang>
|
||||
|
||||
@@ -289,6 +289,13 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Anonymous {#anonymous}
|
||||
|
||||
If you want to access resources with public anonymous access then set
|
||||
`account` only. You can do this without making an rclone config:
|
||||
|
||||
rclone lsf :azureblob,account=ACCOUNT:CONTAINER
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azureblob/azureblob.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone"
|
||||
description: "Show help for rclone commands, flags and backends."
|
||||
slug: rclone
|
||||
url: /commands/rclone/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
## rclone
|
||||
@@ -125,7 +123,7 @@ rclone [flags]
|
||||
--box-token-url string Token server url
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
|
||||
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
|
||||
--bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.
|
||||
--bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
|
||||
--ca-cert stringArray CA certificate used to verify servers
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s)
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone about"
|
||||
description: "Get quota information from the remote."
|
||||
slug: rclone_about
|
||||
url: /commands/rclone_about/
|
||||
versionIntroduced: v1.41
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/about/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone authorize"
|
||||
description: "Remote authorization."
|
||||
slug: rclone_authorize
|
||||
url: /commands/rclone_authorize/
|
||||
versionIntroduced: v1.27
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/authorize/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone backend"
|
||||
description: "Run a backend-specific command."
|
||||
slug: rclone_backend
|
||||
url: /commands/rclone_backend/
|
||||
groups: Important
|
||||
versionIntroduced: v1.52
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/backend/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone bisync"
|
||||
description: "Perform bidirectional synchronization between two paths."
|
||||
slug: rclone_bisync
|
||||
url: /commands/rclone_bisync/
|
||||
groups: Filter,Copy,Important
|
||||
status: Beta
|
||||
versionIntroduced: v1.58
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/bisync/ and as part of making a release run "make commanddocs"
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone cat"
|
||||
description: "Concatenates any files and sends them to stdout."
|
||||
slug: rclone_cat
|
||||
url: /commands/rclone_cat/
|
||||
groups: Filter,Listing
|
||||
versionIntroduced: v1.33
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cat/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone check"
|
||||
description: "Checks the files in the source and destination match."
|
||||
slug: rclone_check
|
||||
url: /commands/rclone_check/
|
||||
groups: Filter,Listing,Check
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/check/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone check
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone checksum"
|
||||
description: "Checks the files in the destination against a SUM file."
|
||||
slug: rclone_checksum
|
||||
url: /commands/rclone_checksum/
|
||||
groups: Filter,Listing
|
||||
versionIntroduced: v1.56
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/checksum/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone cleanup"
|
||||
description: "Clean up the remote if possible."
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
groups: Important
|
||||
versionIntroduced: v1.31
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cleanup/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone completion"
|
||||
description: "Output completion script for a given shell."
|
||||
slug: rclone_completion
|
||||
url: /commands/rclone_completion/
|
||||
versionIntroduced: v1.33
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone completion bash"
|
||||
description: "Output bash completion script for rclone."
|
||||
slug: rclone_completion_bash
|
||||
url: /commands/rclone_completion_bash/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/bash/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone completion bash
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone completion fish"
|
||||
description: "Output fish completion script for rclone."
|
||||
slug: rclone_completion_fish
|
||||
url: /commands/rclone_completion_fish/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/fish/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone completion fish
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone completion powershell"
|
||||
description: "Output powershell completion script for rclone."
|
||||
slug: rclone_completion_powershell
|
||||
url: /commands/rclone_completion_powershell/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/powershell/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone completion powershell
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone completion zsh"
|
||||
description: "Output zsh completion script for rclone."
|
||||
slug: rclone_completion_zsh
|
||||
url: /commands/rclone_completion_zsh/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/completion/zsh/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone completion zsh
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config"
|
||||
description: "Enter an interactive configuration session."
|
||||
slug: rclone_config
|
||||
url: /commands/rclone_config/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config create"
|
||||
description: "Create a new remote with name, type and options."
|
||||
slug: rclone_config_create
|
||||
url: /commands/rclone_config_create/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/create/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config delete"
|
||||
description: "Delete an existing remote."
|
||||
slug: rclone_config_delete
|
||||
url: /commands/rclone_config_delete/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/delete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config disconnect"
|
||||
description: "Disconnects user from remote"
|
||||
slug: rclone_config_disconnect
|
||||
url: /commands/rclone_config_disconnect/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/disconnect/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone config disconnect
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config dump"
|
||||
description: "Dump the config file as JSON."
|
||||
slug: rclone_config_dump
|
||||
url: /commands/rclone_config_dump/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/dump/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config edit"
|
||||
description: "Enter an interactive configuration session."
|
||||
slug: rclone_config_edit
|
||||
url: /commands/rclone_config_edit/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/edit/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config file"
|
||||
description: "Show path of configuration file in use."
|
||||
slug: rclone_config_file
|
||||
url: /commands/rclone_config_file/
|
||||
versionIntroduced: v1.38
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/file/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config password"
|
||||
description: "Update password in an existing remote."
|
||||
slug: rclone_config_password
|
||||
url: /commands/rclone_config_password/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/password/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config paths"
|
||||
description: "Show paths used for configuration, cache, temp etc."
|
||||
slug: rclone_config_paths
|
||||
url: /commands/rclone_config_paths/
|
||||
versionIntroduced: v1.57
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/paths/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config providers"
|
||||
description: "List in JSON format all the providers and options."
|
||||
slug: rclone_config_providers
|
||||
url: /commands/rclone_config_providers/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/providers/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config reconnect"
|
||||
description: "Re-authenticates user with remote."
|
||||
slug: rclone_config_reconnect
|
||||
url: /commands/rclone_config_reconnect/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/reconnect/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone config reconnect
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config redacted"
|
||||
description: "Print redacted (decrypted) config file, or the redacted config for a single remote."
|
||||
slug: rclone_config_redacted
|
||||
url: /commands/rclone_config_redacted/
|
||||
versionIntroduced: v1.64
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/redacted/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config show"
|
||||
description: "Print (decrypted) config file, or the config for a single remote."
|
||||
slug: rclone_config_show
|
||||
url: /commands/rclone_config_show/
|
||||
versionIntroduced: v1.38
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/show/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config touch"
|
||||
description: "Ensure configuration file exists."
|
||||
slug: rclone_config_touch
|
||||
url: /commands/rclone_config_touch/
|
||||
versionIntroduced: v1.56
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/touch/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config update"
|
||||
description: "Update options in an existing remote."
|
||||
slug: rclone_config_update
|
||||
url: /commands/rclone_config_update/
|
||||
versionIntroduced: v1.39
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/update/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone config userinfo"
|
||||
description: "Prints info about logged in user of remote."
|
||||
slug: rclone_config_userinfo
|
||||
url: /commands/rclone_config_userinfo/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/userinfo/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone config userinfo
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone copy"
|
||||
description: "Copy files from source to dest, skipping identical files."
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
groups: Copy,Filter,Listing,Important
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copy/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone copy
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone copyto"
|
||||
description: "Copy files from source to dest, skipping identical files."
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
groups: Copy,Filter,Listing,Important
|
||||
versionIntroduced: v1.35
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyto/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone copyurl"
|
||||
description: "Copy the contents of the URL supplied content to dest:path."
|
||||
slug: rclone_copyurl
|
||||
url: /commands/rclone_copyurl/
|
||||
groups: Important
|
||||
versionIntroduced: v1.43
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/copyurl/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone cryptcheck"
|
||||
description: "Cryptcheck checks the integrity of an encrypted remote."
|
||||
slug: rclone_cryptcheck
|
||||
url: /commands/rclone_cryptcheck/
|
||||
groups: Filter,Listing,Check
|
||||
versionIntroduced: v1.36
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptcheck/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone cryptdecode"
|
||||
description: "Cryptdecode returns unencrypted file names."
|
||||
slug: rclone_cryptdecode
|
||||
url: /commands/rclone_cryptdecode/
|
||||
versionIntroduced: v1.38
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/cryptdecode/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone dedupe"
|
||||
description: "Interactively find duplicate filenames and delete/rename them."
|
||||
slug: rclone_dedupe
|
||||
url: /commands/rclone_dedupe/
|
||||
groups: Important
|
||||
versionIntroduced: v1.27
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/dedupe/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone delete"
|
||||
description: "Remove the files in path."
|
||||
slug: rclone_delete
|
||||
url: /commands/rclone_delete/
|
||||
groups: Important,Filter,Listing
|
||||
versionIntroduced: v1.27
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/delete/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone deletefile"
|
||||
description: "Remove a single file from remote."
|
||||
slug: rclone_deletefile
|
||||
url: /commands/rclone_deletefile/
|
||||
groups: Important
|
||||
versionIntroduced: v1.42
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/deletefile/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone gendocs"
|
||||
description: "Output markdown docs for rclone to the directory supplied."
|
||||
slug: rclone_gendocs
|
||||
url: /commands/rclone_gendocs/
|
||||
versionIntroduced: v1.33
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/gendocs/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone gitannex"
|
||||
description: "Speaks with git-annex over stdin/stdout."
|
||||
slug: rclone_gitannex
|
||||
url: /commands/rclone_gitannex/
|
||||
versionIntroduced: v1.67.0
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/gitannex/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
---
|
||||
title: "rclone hashsum"
|
||||
description: "Produces a hashsum file for all the objects in the path."
|
||||
slug: rclone_hashsum
|
||||
url: /commands/rclone_hashsum/
|
||||
groups: Filter,Listing
|
||||
versionIntroduced: v1.41
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/hashsum/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
---
|
||||
title: "rclone link"
|
||||
description: "Generate public link to file/folder."
|
||||
slug: rclone_link
|
||||
url: /commands/rclone_link/
|
||||
versionIntroduced: v1.41
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/link/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user